summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/cppc_acpi.c43
-rw-r--r--drivers/acpi/power.c7
-rw-r--r--drivers/acpi/pptt.c67
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/ata/libata-core.c57
-rw-r--r--drivers/ata/libata-scsi.c48
-rw-r--r--drivers/ata/sata_mv.c4
-rw-r--r--drivers/base/arch_topology.c15
-rw-r--r--drivers/base/regmap/regcache-rbtree.c7
-rw-r--r--drivers/base/regmap/regmap-spi.c36
-rw-r--r--drivers/base/topology.c10
-rw-r--r--drivers/block/Kconfig25
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c9
-rw-r--r--drivers/block/aoe/aoeblk.c19
-rw-r--r--drivers/block/ataflop.c110
-rw-r--r--drivers/block/brd.c12
-rw-r--r--drivers/block/cryptoloop.c206
-rw-r--r--drivers/block/drbd/drbd_int.h5
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/floppy.c35
-rw-r--r--drivers/block/loop.c420
-rw-r--r--drivers/block/loop.h30
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/n64cart.c24
-rw-r--r--drivers/block/nbd.c176
-rw-r--r--drivers/block/null_blk/main.c195
-rw-r--r--drivers/block/null_blk/null_blk.h6
-rw-r--r--drivers/block/paride/pcd.c312
-rw-r--r--drivers/block/paride/pd.c148
-rw-r--r--drivers/block/paride/pf.c236
-rw-r--r--drivers/block/pktcdvd.c20
-rw-r--r--drivers/block/ps3vram.c6
-rw-r--r--drivers/block/rbd.c8
-rw-r--r--drivers/block/rnbd/rnbd-clt.c15
-rw-r--r--drivers/block/rnbd/rnbd-proto.h2
-rw-r--r--drivers/block/rsxx/core.c4
-rw-r--r--drivers/block/rsxx/dev.c19
-rw-r--r--drivers/block/swim.c36
-rw-r--r--drivers/block/swim3.c5
-rw-r--r--drivers/block/sx8.c15
-rw-r--r--drivers/block/virtio_blk.c16
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c9
-rw-r--r--drivers/block/zram/zram_drv.c10
-rw-r--r--drivers/cdrom/cdrom.c63
-rw-r--r--drivers/cdrom/gdrom.c7
-rw-r--r--drivers/char/ipmi/Kconfig11
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c69
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c8
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c539
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c330
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c25
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c9
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm2-space.c3
-rw-r--r--drivers/char/tpm/tpm_tis_core.c26
-rw-r--r--drivers/char/tpm/tpm_tis_core.h4
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c1
-rw-r--r--drivers/clk/clk-composite.c10
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/arc_timer.c6
-rw-r--r--drivers/clocksource/arm_arch_timer.c243
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h14
-rw-r--r--drivers/cxl/cxl.h61
-rw-r--r--drivers/dma/pxa_dma.c3
-rw-r--r--drivers/edac/al_mc_edac.c12
-rw-r--r--drivers/edac/amd64_edac.c22
-rw-r--r--drivers/edac/edac_mc.c42
-rw-r--r--drivers/edac/edac_mc_sysfs.c8
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/edac/ti_edac.c7
-rw-r--r--drivers/firewire/core-cdev.c32
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/firmware/psci/psci_checker.c2
-rw-r--r--drivers/gpio/gpio-mlxbf2.c5
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c6
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c6
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c18
-rw-r--r--drivers/gpu/drm/drm_cache.c4
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h7
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c30
-rw-r--r--drivers/gpu/drm/kmb/kmb_crtc.c41
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c2
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h10
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.c25
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.h2
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c43
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.h6
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c3
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c12
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c31
-rw-r--r--drivers/hid/hid-cp2112.c14
-rw-r--r--drivers/hid/hid-roccat-kone.c2
-rw-r--r--drivers/hid/hid-roccat-kone.h12
-rw-r--r--drivers/hv/hyperv_vmbus.h1
-rw-r--r--drivers/hwmon/xgene-hwmon.c35
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c33
-rw-r--r--drivers/infiniband/core/sa_query.c5
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/irdma/uk.c4
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c8
-rw-r--r--drivers/infiniband/hw/irdma/ws.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h1
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c33
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/iommu/amd/init.c16
-rw-r--r--drivers/iommu/amd/iommu.c3
-rw-r--r--drivers/iommu/amd/iommu_v2.c3
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/irqchip/Kconfig25
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-apple-aic.c20
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c13
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c2
-rw-r--r--drivers/irqchip/irq-ativic32.c22
-rw-r--r--drivers/irqchip/irq-atmel-aic.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c2
-rw-r--r--drivers/irqchip/irq-bcm2835.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c6
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c47
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c21
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c16
-rw-r--r--drivers/irqchip/irq-clps711x.c8
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c2
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c4
-rw-r--r--drivers/irqchip/irq-davinci-aintc.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c2
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c2
-rw-r--r--drivers/irqchip/irq-ftintc010.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-ixp4xx.c4
-rw-r--r--drivers/irqchip/irq-lpc32xx.c2
-rw-r--r--drivers/irqchip/irq-mchp-eic.c280
-rw-r--r--drivers/irqchip/irq-meson-gpio.c15
-rw-r--r--drivers/irqchip/irq-mips-gic.c37
-rw-r--r--drivers/irqchip/irq-mmp.c4
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c4
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c4
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-nvic.c17
-rw-r--r--drivers/irqchip/irq-omap-intc.c2
-rw-r--r--drivers/irqchip/irq-or1k-pic.c2
-rw-r--r--drivers/irqchip/irq-orion.c4
-rw-r--r--drivers/irqchip/irq-rda-intc.c2
-rw-r--r--drivers/irqchip/irq-riscv-intc.c2
-rw-r--r--drivers/irqchip/irq-sa11x0.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c4
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c4
-rw-r--r--drivers/irqchip/irq-ts4800.c4
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/irqchip/irq-vt8500.c2
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c2
-rw-r--r--drivers/irqchip/irq-zevio.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c8
-rw-r--r--drivers/leds/led-class-flash.c2
-rw-r--r--drivers/leds/led-triggers.c41
-rw-r--r--drivers/leds/trigger/Kconfig1
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/mailbox/Kconfig12
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/apple-mailbox.c384
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c4
-rw-r--r--drivers/mailbox/hi3660-mailbox.c4
-rw-r--r--drivers/mailbox/hi6220-mailbox.c7
-rw-r--r--drivers/mailbox/imx-mailbox.c124
-rw-r--r--drivers/mailbox/mailbox-altera.c5
-rw-r--r--drivers/mailbox/mailbox-sti.c4
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c4
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c15
-rw-r--r--drivers/mailbox/omap-mailbox.c4
-rw-r--r--drivers/mailbox/pcc.c598
-rw-r--r--drivers/mailbox/platform_mhu.c4
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c31
-rw-r--r--drivers/mailbox/stm32-ipcc.c4
-rw-r--r--drivers/mailbox/sun6i-msgbox.c9
-rw-r--r--drivers/md/bcache/bcache.h6
-rw-r--r--drivers/md/bcache/bcache_ondisk.h445
-rw-r--r--drivers/md/bcache/bset.h2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/debug.c15
-rw-r--r--drivers/md/bcache/features.c2
-rw-r--r--drivers/md/bcache/features.h3
-rw-r--r--drivers/md/bcache/io.c16
-rw-r--r--drivers/md/bcache/request.c19
-rw-r--r--drivers/md/bcache/request.h4
-rw-r--r--drivers/md/bcache/super.c91
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/sysfs.h18
-rw-r--r--drivers/md/bcache/util.h29
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/dm-bio-record.h1
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-clone-target.c2
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-crypt.c1
-rw-r--r--drivers/md/dm-dust.c5
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-exception-store.h2
-rw-r--r--drivers/md/dm-flakey.c3
-rw-r--r--drivers/md/dm-ima.c1
-rw-r--r--drivers/md/dm-integrity.c6
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-ps-historical-service-time.c1
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-table.c172
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c42
-rw-r--r--drivers/md/md.c130
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/media/cec/Kconfig4
-rw-r--r--drivers/media/cec/core/cec-pin.c4
-rw-r--r--drivers/media/cec/platform/meson/ao-cec-g12a.c4
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c4
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c4
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c4
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.c7
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c149
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c198
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c39
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c59
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c30
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c9
-rw-r--r--drivers/media/dvb-frontends/cxd2099.h9
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_priv.h2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c4
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c18
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c9
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.h9
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_defs.h4
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_regs.h10
-rw-r--r--drivers/media/dvb-frontends/mxl692.c9
-rw-r--r--drivers/media/dvb-frontends/mxl692.h9
-rw-r--r--drivers/media/dvb-frontends/mxl692_defs.h9
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c5
-rw-r--r--drivers/media/dvb-frontends/stv0910.c9
-rw-r--r--drivers/media/dvb-frontends/stv0910.h9
-rw-r--r--drivers/media/dvb-frontends/stv6111.c9
-rw-r--r--drivers/media/dvb-frontends/stv6111.h9
-rw-r--r--drivers/media/firewire/firedtv-avc.c14
-rw-r--r--drivers/media/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/i2c/Kconfig27
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/adv7604.c15
-rw-r--r--drivers/media/i2c/dw9714.c14
-rw-r--r--drivers/media/i2c/hi846.c2190
-rw-r--r--drivers/media/i2c/imx258.c12
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c1
-rw-r--r--drivers/media/i2c/max9286.c17
-rw-r--r--drivers/media/i2c/mt9p031.c80
-rw-r--r--drivers/media/i2c/ov13858.c11
-rw-r--r--drivers/media/i2c/ov13b10.c1491
-rw-r--r--drivers/media/i2c/ov5670.c11
-rw-r--r--drivers/media/i2c/ov8856.c83
-rw-r--r--drivers/media/i2c/st-mipid02.c22
-rw-r--r--drivers/media/i2c/tda1997x.c131
-rw-r--r--drivers/media/i2c/tda1997x_regs.h3
-rw-r--r--drivers/media/i2c/video-i2c.c21
-rw-r--r--drivers/media/mc/Kconfig8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c4
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c4
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c13
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c24
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-main.c4
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c60
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.h9
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c274
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c18
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c22
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c19
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c10
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c12
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c29
-rw-r--r--drivers/media/pci/pluto2/pluto2.c20
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c53
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c29
-rw-r--r--drivers/media/pci/saa7134/saa7134.h1
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c2
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c2
-rw-r--r--drivers/media/platform/Kconfig20
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c311
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.c23
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.h10
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.c74
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.h200
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.c202
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.h189
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c23
-rw-r--r--drivers/media/platform/aspeed-video.c133
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c29
-rw-r--r--drivers/media/platform/atmel/atmel-isc.h2
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c17
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c54
-rw-r--r--drivers/media/platform/atmel/atmel-sama7g5-isc.c37
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c18
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c4
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c3
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c9
-rw-r--r--drivers/media/platform/davinci/vpif.c5
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c21
-rw-r--r--drivers/media/platform/davinci/vpss.c10
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c3
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c20
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c4
-rw-r--r--drivers/media/platform/imx-jpeg/mxc-jpeg.c109
-rw-r--r--drivers/media/platform/imx-jpeg/mxc-jpeg.h2
-rw-r--r--drivers/media/platform/imx-pxp.c4
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c9
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c10
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c6
-rw-r--r--drivers/media/platform/meson/ge2d/ge2d.c10
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile3
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c820
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h27
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c65
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c628
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c360
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h59
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c148
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c75
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c774
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h23
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c43
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h5
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c5
-rw-r--r--drivers/media/platform/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/omap/omap_vout.c18
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c2
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/omap3isp/isp.c21
-rw-r--r--drivers/media/platform/pxa_camera.c26
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-170.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c28
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c18
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-8.c17
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h2
-rw-r--r--drivers/media/platform/qcom/camss/camss.c18
-rw-r--r--drivers/media/platform/qcom/venus/core.c135
-rw-r--r--drivers/media/platform/qcom/venus/core.h9
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c42
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c81
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c48
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h14
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c13
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c4
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h2
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c13
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c67
-rw-r--r--drivers/media/platform/qcom/venus/venc.c116
-rw-r--r--drivers/media/platform/rcar-isp.c515
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c959
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c241
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c40
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c25
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h25
-rw-r--r--drivers/media/platform/rcar_drif.c17
-rw-r--r--drivers/media/platform/rcar_fdp1.c4
-rw-r--r--drivers/media/platform/rcar_jpu.c4
-rw-r--r--drivers/media/platform/renesas-ceu.c33
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c9
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h44
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c98
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c29
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c557
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h406
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c107
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c6
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c4
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c9
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c3
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c1
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c1
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c4
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c37
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c16
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c33
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c8
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c4
-rw-r--r--drivers/media/platform/ti-vpe/cal.c16
-rw-r--r--drivers/media/platform/via-camera.c6
-rw-r--r--drivers/media/platform/video-mux.c17
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c18
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h11
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c17
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/rc/Kconfig8
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c4
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c4
-rw-r--r--drivers/media/rc/ir_toy.c63
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/mceusb.c2
-rw-r--r--drivers/media/rc/meson-ir-tx.c1
-rw-r--r--drivers/media/rc/meson-ir.c4
-rw-r--r--drivers/media/rc/mtk-cir.c4
-rw-r--r--drivers/media/rc/sir_ir.c438
-rw-r--r--drivers/media/rc/st_rc.c5
-rw-r--r--drivers/media/rc/streamzap.c1
-rw-r--r--drivers/media/rc/sunxi-cir.c4
-rw-r--r--drivers/media/spi/cxd2880-spi.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c4
-rw-r--r--drivers/media/test-drivers/vim2m.c5
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c366
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c341
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.h9
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c52
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h23
-rw-r--r--drivers/media/tuners/mxl5007t.c9
-rw-r--r--drivers/media/tuners/tuner-types.c4
-rw-r--r--drivers/media/usb/airspy/airspy.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c16
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c1
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c12
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c5
-rw-r--r--drivers/media/usb/gspca/gl860/gl860-mi1320.c87
-rw-r--r--drivers/media/usb/gspca/gl860/gl860-ov9655.c169
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_ov7660.h1
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c22
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ctrl.c25
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c11
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c3
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c10
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c260
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c16
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c103
-rw-r--r--drivers/media/usb/uvc/uvc_video.c5
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h17
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c168
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c83
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c77
-rw-r--r--drivers/memstick/core/ms_block.c8
-rw-r--r--drivers/memstick/core/mspro_block.c6
-rw-r--r--drivers/memstick/host/jmb38x_ms.c5
-rw-r--r--drivers/memstick/host/r592.c8
-rw-r--r--drivers/mmc/core/block.c27
-rw-r--r--drivers/mmc/core/crypto.c11
-rw-r--r--drivers/mmc/core/mmc.c8
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c1
-rw-r--r--drivers/mmc/core/slot-gpio.c42
-rw-r--r--drivers/mmc/host/Kconfig10
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/cqhci-core.c7
-rw-r--r--drivers/mmc/host/cqhci-crypto.c33
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c26
-rw-r--r--drivers/mmc/host/dw_mmc.c42
-rw-r--r--drivers/mmc/host/mmci.c4
-rw-r--r--drivers/mmc/host/moxart-mmc.c29
-rw-r--r--drivers/mmc/host/mtk-sd.c137
-rw-r--r--drivers/mmc/host/mxs-mmc.c10
-rw-r--r--drivers/mmc/host/omap_hsmmc.c12
-rw-r--r--drivers/mmc/host/sdhci-acpi.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c33
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c29
-rw-r--r--drivers/mmc/host/sdhci-omap.c322
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c159
-rw-r--r--drivers/mmc/host/sdhci-pci-data.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c2
-rw-r--r--drivers/mmc/host/sdhci-pci.h5
-rw-r--r--drivers/mmc/host/sdhci-s3c.c1
-rw-r--r--drivers/mmc/host/sdhci-sprd.c13
-rw-r--r--drivers/mmc/host/sdhci.c48
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c17
-rw-r--r--drivers/mmc/host/vub300.c18
-rw-r--r--drivers/mtd/mtd_blkdevs.c6
-rw-r--r--drivers/mtd/mtdsuper.c1
-rw-r--r--drivers/net/can/flexcan.c68
-rw-r--r--drivers/net/can/m_can/m_can_platform.c14
-rw-r--r--drivers/net/can/rcar/rcar_can.c20
-rw-r--r--drivers/net/can/sja1000/peak_pci.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.h2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.h2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c8
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2
-rw-r--r--drivers/net/dsa/mt7530.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c31
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c148
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c25
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c35
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c4
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c37
-rw-r--r--drivers/net/ethernet/sfc/ptp.c4
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/hamradio/baycom_epp.c6
-rw-r--r--drivers/net/phy/phy.c140
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/lan78xx.c6
-rw-r--r--drivers/net/usb/usbnet.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h12
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nfc/port100.c4
-rw-r--r--drivers/nfc/st95hf/core.c6
-rw-r--r--drivers/nvdimm/blk.c5
-rw-r--r--drivers/nvdimm/btt.c5
-rw-r--r--drivers/nvdimm/core.c1
-rw-r--r--drivers/nvdimm/pmem.c36
-rw-r--r--drivers/nvme/host/core.c140
-rw-r--r--drivers/nvme/host/fabrics.c6
-rw-r--r--drivers/nvme/host/fabrics.h8
-rw-r--r--drivers/nvme/host/fc.c34
-rw-r--r--drivers/nvme/host/multipath.c54
-rw-r--r--drivers/nvme/host/nvme.h19
-rw-r--r--drivers/nvme/host/pci.c58
-rw-r--r--drivers/nvme/host/rdma.c28
-rw-r--r--drivers/nvme/host/tcp.c29
-rw-r--r--drivers/nvme/host/zns.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c18
-rw-r--r--drivers/nvme/target/configfs.c41
-rw-r--r--drivers/nvme/target/core.c18
-rw-r--r--drivers/nvme/target/discovery.c19
-rw-r--r--drivers/nvme/target/fabrics-cmd.c3
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c5
-rw-r--r--drivers/nvme/target/io-cmd-file.c4
-rw-r--r--drivers/nvme/target/loop.c6
-rw-r--r--drivers/nvme/target/nvmet.h6
-rw-r--r--drivers/nvme/target/rdma.c31
-rw-r--r--drivers/nvme/target/tcp.c23
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/pcmcia/db1xxx_ss.c1
-rw-r--r--drivers/perf/Kconfig12
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c2
-rw-r--r--drivers/perf/thunderx2_pmu.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c29
-rw-r--r--drivers/pinctrl/pinctrl-amd.c31
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c4
-rw-r--r--drivers/ptp/ptp_clock.c16
-rw-r--r--drivers/ptp/ptp_kvm_x86.c4
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/reset/Kconfig4
-rw-r--r--drivers/reset/reset-brcmstb-rescal.c2
-rw-r--r--drivers/reset/reset-socfpga.c26
-rw-r--r--drivers/reset/tegra/reset-bpmp.c9
-rw-r--r--drivers/s390/block/dasd.c9
-rw-r--r--drivers/s390/block/dasd_3990_erp.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c294
-rw-r--r--drivers/s390/block/dasd_eckd.h13
-rw-r--r--drivers/s390/block/dasd_erp.c8
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_int.h11
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c1
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h4
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_bsg.c6
-rw-r--r--drivers/scsi/scsi_debug.c10
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_ioctl.c8
-rw-r--r--drivers/scsi/scsi_lib.c32
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c9
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c128
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c11
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/storvsc_drv.c32
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c6
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c32
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.h9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c33
-rw-r--r--drivers/scsi/ufs/ufshcd.c29
-rw-r--r--drivers/scsi/ufs/ufshcd.h6
-rw-r--r--drivers/scsi/ufs/ufshpb.c287
-rw-r--r--drivers/scsi/ufs/ufshpb.h2
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/spi/spi-altera-dfl.c2
-rw-r--r--drivers/spi/spi-altera-platform.c2
-rw-r--r--drivers/spi/spi-pl022.c5
-rw-r--r--drivers/spi/spi-tegra20-slink.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c37
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c70
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c12
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c2
-rw-r--r--drivers/staging/media/hantro/hantro_g1_regs.h2
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c3
-rw-r--r--drivers/staging/media/hantro/hantro_g2_hevc_dec.c52
-rw-r--r--drivers/staging/media/hantro/hantro_hevc.c21
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h4
-rw-r--r--drivers/staging/media/imx/TODO5
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c23
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c9
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c6
-rw-r--r--drivers/staging/media/imx/imx-media-of.c6
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c17
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c24
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c16
-rw-r--r--drivers/staging/media/imx/imx8mq-mipi-csi2.c16
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h7
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.c7
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.h2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c19
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.h1
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c13
-rw-r--r--drivers/staging/media/ipu3/ipu3.h12
-rw-r--r--drivers/staging/media/meson/vdec/esparser.h6
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c7
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h16
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.h3
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c5
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c44
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c56
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c113
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c100
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c7
-rw-r--r--drivers/staging/media/tegra-video/vi.c17
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h6
-rw-r--r--drivers/staging/rtl8712/ieee80211.h4
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h6
-rw-r--r--drivers/target/target_core_file.c5
-rw-r--r--drivers/target/target_core_iblock.c6
-rw-r--r--drivers/target/target_core_pscsi.c7
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c29
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c12
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/watchdog/omap_wdt.c6
-rw-r--r--drivers/watchdog/sbsa_gwdt.c5
788 files changed, 20350 insertions, 8911 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index bd482108310c..a85c351589be 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -43,7 +43,7 @@
#include <acpi/cppc_acpi.h>
struct cppc_pcc_data {
- struct mbox_chan *pcc_channel;
+ struct pcc_mbox_chan *pcc_channel;
void __iomem *pcc_comm_addr;
bool pcc_channel_acquired;
unsigned int deadline_us;
@@ -295,7 +295,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
pcc_ss_data->platform_owns_pcc = true;
/* Ring doorbell */
- ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
+ ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
if (ret < 0) {
pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
pcc_ss_id, cmd, ret);
@@ -308,10 +308,10 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
if (pcc_ss_data->pcc_mrtt)
pcc_ss_data->last_cmd_cmpl_time = ktime_get();
- if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
- mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
+ if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
+ mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
else
- mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
+ mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
end:
if (cmd == CMD_WRITE) {
@@ -493,46 +493,33 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
static int register_pcc_channel(int pcc_ss_idx)
{
- struct acpi_pcct_hw_reduced *cppc_ss;
+ struct pcc_mbox_chan *pcc_chan;
u64 usecs_lat;
if (pcc_ss_idx >= 0) {
- pcc_data[pcc_ss_idx]->pcc_channel =
- pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
+ pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
- if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
+ if (IS_ERR(pcc_chan)) {
pr_err("Failed to find PCC channel for subspace %d\n",
pcc_ss_idx);
return -ENODEV;
}
- /*
- * The PCC mailbox controller driver should
- * have parsed the PCCT (global table of all
- * PCC channels) and stored pointers to the
- * subspace communication region in con_priv.
- */
- cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
-
- if (!cppc_ss) {
- pr_err("No PCC subspace found for %d CPPC\n",
- pcc_ss_idx);
- return -ENODEV;
- }
-
+ pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
/*
* cppc_ss->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
*/
- usecs_lat = NUM_RETRIES * cppc_ss->latency;
+ usecs_lat = NUM_RETRIES * pcc_chan->latency;
pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
- pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
- pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
- pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
+ pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
+ pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
+ pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
pcc_data[pcc_ss_idx]->pcc_comm_addr =
- acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
+ acpi_os_ioremap(pcc_chan->shmem_base_addr,
+ pcc_chan->shmem_size);
if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem for %d\n",
pcc_ss_idx);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index b9863e22b952..f0ed4414edb1 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -1035,13 +1035,8 @@ void acpi_turn_off_unused_power_resources(void)
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
mutex_lock(&resource->resource_lock);
- /*
- * Turn off power resources in an unknown state too, because the
- * platform firmware on some system expects the OS to turn off
- * power resources without any users unconditionally.
- */
if (!resource->ref_count &&
- resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
+ resource->state == ACPI_POWER_RESOURCE_STATE_ON) {
acpi_handle_debug(resource->device.handle, "Turning OFF\n");
__acpi_power_off(resource);
}
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index fe69dc518f31..701f61c01359 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -747,6 +747,73 @@ int find_acpi_cpu_topology_package(unsigned int cpu)
}
/**
+ * find_acpi_cpu_topology_cluster() - Determine a unique CPU cluster value
+ * @cpu: Kernel logical CPU number
+ *
+ * Determine a topology unique cluster ID for the given CPU/thread.
+ * This ID can then be used to group peers, which will have matching ids.
+ *
+ * The cluster, if present is the level of topology above CPUs. In a
+ * multi-thread CPU, it will be the level above the CPU, not the thread.
+ * It may not exist in single CPU systems. In simple multi-CPU systems,
+ * it may be equal to the package topology level.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found
+ * or there is no toplogy level above the CPU..
+ * Otherwise returns a value which represents the package for this CPU.
+ */
+
+int find_acpi_cpu_topology_cluster(unsigned int cpu)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+ struct acpi_pptt_processor *cpu_node, *cluster_node;
+ u32 acpi_cpu_id;
+ int retval;
+ int is_thread;
+
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ acpi_pptt_warn_missing();
+ return -ENOENT;
+ }
+
+ acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (cpu_node == NULL || !cpu_node->parent) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+
+ is_thread = cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD;
+ cluster_node = fetch_pptt_node(table, cpu_node->parent);
+ if (cluster_node == NULL) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+ if (is_thread) {
+ if (!cluster_node->parent) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+ cluster_node = fetch_pptt_node(table, cluster_node->parent);
+ if (cluster_node == NULL) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+ }
+ if (cluster_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
+ retval = cluster_node->acpi_processor_id;
+ else
+ retval = ACPI_PTR_DIFF(cluster_node, table);
+
+put_table:
+ acpi_put_table(table);
+
+ return retval;
+}
+
+/**
* find_acpi_cpu_topology_hetero_id() - Get a core architecture tag
* @cpu: Kernel logical CPU number
*
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f9383736fa0f..71419eb16e09 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -21,6 +21,7 @@
#include <linux/earlycpio.h>
#include <linux/initrd.h>
#include <linux/security.h>
+#include <linux/kmemleak.h>
#include "internal.h"
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -601,6 +602,8 @@ void __init acpi_table_upgrade(void)
*/
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+ kmemleak_ignore_phys(acpi_tables_addr);
+
/*
* early_ioremap only can remap 256k one time. If we map all
* tables one time, we will hit the limit. Need to map chunks
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index eed65311b5d1..75f1a6cd6621 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2459,18 +2459,70 @@ static void ata_dev_config_devslp(struct ata_device *dev)
}
}
+static void ata_dev_config_cpr(struct ata_device *dev)
+{
+ unsigned int err_mask;
+ size_t buf_len;
+ int i, nr_cpr = 0;
+ struct ata_cpr_log *cpr_log = NULL;
+ u8 *desc, *buf = NULL;
+
+ if (!ata_identify_page_supported(dev,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+ goto out;
+
+ /*
+ * Read IDENTIFY DEVICE data log, page 0x47
+ * (concurrent positioning ranges). We can have at most 255 32B range
+ * descriptors plus a 64B header.
+ */
+ buf_len = (64 + 255 * 32 + 511) & ~511;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES,
+ buf, buf_len >> 9);
+ if (err_mask)
+ goto out;
+
+ nr_cpr = buf[0];
+ if (!nr_cpr)
+ goto out;
+
+ cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
+ if (!cpr_log)
+ goto out;
+
+ cpr_log->nr_cpr = nr_cpr;
+ desc = &buf[64];
+ for (i = 0; i < nr_cpr; i++, desc += 32) {
+ cpr_log->cpr[i].num = desc[0];
+ cpr_log->cpr[i].num_storage_elements = desc[1];
+ cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
+ cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
+ }
+
+out:
+ swap(dev->cpr_log, cpr_log);
+ kfree(cpr_log);
+ kfree(buf);
+}
+
static void ata_dev_print_features(struct ata_device *dev)
{
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
return;
ata_dev_info(dev,
- "Features:%s%s%s%s%s\n",
+ "Features:%s%s%s%s%s%s\n",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
- dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "");
+ dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
+ dev->cpr_log ? " CPR" : "");
}
/**
@@ -2634,6 +2686,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_sense_reporting(dev);
ata_dev_config_zac(dev);
ata_dev_config_trusted(dev);
+ ata_dev_config_cpr(dev);
dev->cdb_len = 32;
if (ata_msg_drv(ap) && print_info)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1fb4611f7eeb..15a279f773c7 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1895,7 +1895,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
*/
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
{
- int num_pages;
+ int i, num_pages = 0;
static const u8 pages[] = {
0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */
@@ -1905,13 +1905,17 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
0xb1, /* page 0xb1, block device characteristics page */
0xb2, /* page 0xb2, thin provisioning page */
0xb6, /* page 0xb6, zoned block device characteristics */
+ 0xb9, /* page 0xb9, concurrent positioning ranges */
};
- num_pages = sizeof(pages);
- if (!(args->dev->flags & ATA_DFLAG_ZAC))
- num_pages--;
+ for (i = 0; i < sizeof(pages); i++) {
+ if (pages[i] == 0xb6 &&
+ !(args->dev->flags & ATA_DFLAG_ZAC))
+ continue;
+ rbuf[num_pages + 4] = pages[i];
+ num_pages++;
+ }
rbuf[3] = num_pages; /* number of supported VPD pages */
- memcpy(rbuf + 4, pages, num_pages);
return 0;
}
@@ -2121,6 +2125,26 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
return 0;
}
+static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+{
+ struct ata_cpr_log *cpr_log = args->dev->cpr_log;
+ u8 *desc = &rbuf[64];
+ int i;
+
+ /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
+ rbuf[1] = 0xb9;
+ put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]);
+
+ for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
+ desc[0] = cpr_log->cpr[i].num;
+ desc[1] = cpr_log->cpr[i].num_storage_elements;
+ put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]);
+ put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
+ }
+
+ return 0;
+}
+
/**
* modecpy - Prepare response for MODE SENSE
* @dest: output buffer
@@ -4120,11 +4144,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
break;
case 0xb6:
- if (dev->flags & ATA_DFLAG_ZAC) {
+ if (dev->flags & ATA_DFLAG_ZAC)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
- break;
- }
- fallthrough;
+ else
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ break;
+ case 0xb9:
+ if (dev->cpr_log)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
+ else
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ break;
default:
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9d86203e1e7a..c53633d47bfb 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3896,8 +3896,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
default:
- dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
- return 1;
+ dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
+ return -EINVAL;
}
hpriv->hp_flags = hp_flags;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 43407665918f..fc0836f460fb 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -600,6 +600,11 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
return core_mask;
}
+const struct cpumask *cpu_clustergroup_mask(int cpu)
+{
+ return &cpu_topology[cpu].cluster_sibling;
+}
+
void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -617,6 +622,12 @@ void update_siblings_masks(unsigned int cpuid)
if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
+ if (cpuid_topo->cluster_id == cpu_topo->cluster_id &&
+ cpuid_topo->cluster_id != -1) {
+ cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
+ cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
+ }
+
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
@@ -635,6 +646,9 @@ static void clear_cpu_topology(int cpu)
cpumask_clear(&cpu_topo->llc_sibling);
cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
+ cpumask_clear(&cpu_topo->cluster_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
+
cpumask_clear(&cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
@@ -650,6 +664,7 @@ void __init reset_cpu_topology(void)
cpu_topo->thread_id = -1;
cpu_topo->core_id = -1;
+ cpu_topo->cluster_id = -1;
cpu_topo->package_id = -1;
cpu_topo->llc_id = -1;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index cfa29dc89bbf..fabf87058d80 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (!blk)
return -ENOMEM;
+ rbnode->block = blk;
+
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
present = krealloc(rbnode->cache_present,
BITS_TO_LONGS(blklen) * sizeof(*present),
GFP_KERNEL);
- if (!present) {
- kfree(blk);
+ if (!present)
return -ENOMEM;
- }
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
@@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
}
/* update the rbnode block, its size and the base register */
- rbnode->block = blk;
rbnode->blklen = blklen;
rbnode->base_reg = base_reg;
rbnode->cache_present = present;
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index c1894e93c378..719323bc6c7f 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -109,13 +109,37 @@ static const struct regmap_bus regmap_spi = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
+static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
+ const struct regmap_config *config)
+{
+ size_t max_size = spi_max_transfer_size(spi);
+ struct regmap_bus *bus;
+
+ if (max_size != SIZE_MAX) {
+ bus = kmemdup(&regmap_spi, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return ERR_PTR(-ENOMEM);
+
+ bus->free_on_exit = true;
+ bus->max_raw_read = max_size;
+ bus->max_raw_write = max_size;
+ return bus;
+ }
+
+ return &regmap_spi;
+}
+
struct regmap *__regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
- return __regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
- lock_key, lock_name);
+ const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_spi);
@@ -124,8 +148,12 @@ struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
struct lock_class_key *lock_key,
const char *lock_name)
{
- return __devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
- lock_key, lock_name);
+ const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 43c0940643f5..8f2b641d0b8c 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -48,6 +48,9 @@ static DEVICE_ATTR_RO(physical_package_id);
define_id_show_func(die_id);
static DEVICE_ATTR_RO(die_id);
+define_id_show_func(cluster_id);
+static DEVICE_ATTR_RO(cluster_id);
+
define_id_show_func(core_id);
static DEVICE_ATTR_RO(core_id);
@@ -63,6 +66,10 @@ define_siblings_read_func(core_siblings, core_cpumask);
static BIN_ATTR_RO(core_siblings, 0);
static BIN_ATTR_RO(core_siblings_list, 0);
+define_siblings_read_func(cluster_cpus, cluster_cpumask);
+static BIN_ATTR_RO(cluster_cpus, 0);
+static BIN_ATTR_RO(cluster_cpus_list, 0);
+
define_siblings_read_func(die_cpus, die_cpumask);
static BIN_ATTR_RO(die_cpus, 0);
static BIN_ATTR_RO(die_cpus_list, 0);
@@ -94,6 +101,8 @@ static struct bin_attribute *bin_attrs[] = {
&bin_attr_thread_siblings_list,
&bin_attr_core_siblings,
&bin_attr_core_siblings_list,
+ &bin_attr_cluster_cpus,
+ &bin_attr_cluster_cpus_list,
&bin_attr_die_cpus,
&bin_attr_die_cpus_list,
&bin_attr_package_cpus,
@@ -112,6 +121,7 @@ static struct bin_attribute *bin_attrs[] = {
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
&dev_attr_die_id.attr,
+ &dev_attr_cluster_id.attr,
&dev_attr_core_id.attr,
#ifdef CONFIG_SCHED_BOOK
&dev_attr_book_id.attr,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ab3e37aa1830..d97eaf6adb6d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -180,14 +180,6 @@ config BLK_DEV_LOOP
bits of, say, a sound file). This is also safe if the file resides
on a remote file server.
- There are several ways of encrypting disks. Some of these require
- kernel patches. The vanilla kernel offers the cryptoloop option
- and a Device Mapper target (which is superior, as it supports all
- file systems). If you want to use the cryptoloop, say Y to both
- LOOP and CRYPTOLOOP, and make sure you have a recent (version 2.12
- or later) version of util-linux. Additionally, be aware that
- the cryptoloop is not safe for storing journaled filesystems.
-
Note that this loop device has nothing to do with the loopback
device used for network connections from the machine to itself.
@@ -211,21 +203,6 @@ config BLK_DEV_LOOP_MIN_COUNT
is used, it can be set to 0, since needed loop devices can be
dynamically allocated with the /dev/loop-control interface.
-config BLK_DEV_CRYPTOLOOP
- tristate "Cryptoloop Support (DEPRECATED)"
- select CRYPTO
- select CRYPTO_CBC
- depends on BLK_DEV_LOOP
- help
- Say Y here if you want to be able to use the ciphers that are
- provided by the CryptoAPI as loop transformation. This might be
- used as hard disk encryption.
-
- WARNING: This device is not safe for journaled file systems like
- ext3 or Reiserfs. Please use the Device Mapper crypto module
- instead, which can be configured to be on-disk compatible with the
- cryptoloop device. cryptoloop support will be removed in Linux 5.16.
-
source "drivers/block/drbd/Kconfig"
config BLK_DEV_NBD
@@ -304,8 +281,8 @@ config BLK_DEV_RAM_SIZE
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
+ depends on SCSI
select CDROM
- select SCSI_COMMON
help
Note: This driver is deprecated and will be removed from the
kernel in the near future!
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index bc68817ef496..11a74f17c9ad 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
-obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8b1714021498..bf5c124c5452 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -61,10 +61,10 @@
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/blk-mq.h>
-#include <linux/elevator.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -1780,6 +1780,7 @@ static const struct blk_mq_ops amiflop_mq_ops = {
static int fd_alloc_disk(int drive, int system)
{
struct gendisk *disk;
+ int err;
disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
if (IS_ERR(disk))
@@ -1798,8 +1799,10 @@ static int fd_alloc_disk(int drive, int system)
set_capacity(disk, 880 * 2);
unit[drive].gendisk[system] = disk;
- add_disk(disk);
- return 0;
+ err = add_disk(disk);
+ if (err)
+ blk_cleanup_disk(disk);
+ return err;
}
static int fd_alloc_drive(int drive)
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 06b360f7123a..52484bcdedb9 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -37,8 +37,7 @@ static ssize_t aoedisk_show_state(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
- return snprintf(page, PAGE_SIZE,
- "%s%s\n",
+ return sysfs_emit(page, "%s%s\n",
(d->flags & DEVFL_UP) ? "up" : "down",
(d->flags & DEVFL_KICKME) ? ",kickme" :
(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
@@ -52,8 +51,8 @@ static ssize_t aoedisk_show_mac(struct device *dev,
struct aoetgt *t = d->targets[0];
if (t == NULL)
- return snprintf(page, PAGE_SIZE, "none\n");
- return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
+ return sysfs_emit(page, "none\n");
+ return sysfs_emit(page, "%pm\n", t->addr);
}
static ssize_t aoedisk_show_netif(struct device *dev,
struct device_attribute *attr, char *page)
@@ -85,7 +84,7 @@ static ssize_t aoedisk_show_netif(struct device *dev,
ne = nd;
nd = nds;
if (*nd == NULL)
- return snprintf(page, PAGE_SIZE, "none\n");
+ return sysfs_emit(page, "none\n");
for (p = page; nd < ne; nd++)
p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
p == page ? "" : ",", (*nd)->name);
@@ -99,7 +98,7 @@ static ssize_t aoedisk_show_fwver(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
- return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
+ return sysfs_emit(page, "0x%04x\n", (unsigned int) d->fw_ver);
}
static ssize_t aoedisk_show_payload(struct device *dev,
struct device_attribute *attr, char *page)
@@ -107,7 +106,7 @@ static ssize_t aoedisk_show_payload(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
- return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
+ return sysfs_emit(page, "%lu\n", d->maxbcnt);
}
static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
@@ -417,7 +416,9 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
- device_add_disk(NULL, gd, aoe_attr_groups);
+ err = device_add_disk(NULL, gd, aoe_attr_groups);
+ if (err)
+ goto out_disk_cleanup;
aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
@@ -426,6 +427,8 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
return;
+out_disk_cleanup:
+ blk_cleanup_disk(gd);
err_tagset:
blk_mq_free_tag_set(set);
err_mempool:
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index a093644ac39f..d14bdc3589b2 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -68,6 +68,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/blk-mq.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/wait.h>
@@ -298,6 +299,7 @@ static struct atari_floppy_struct {
disk change detection) */
int flags; /* flags */
struct gendisk *disk[NUM_DISK_MINORS];
+ bool registered[NUM_DISK_MINORS];
int ref;
int type;
struct blk_mq_tag_set tag_set;
@@ -456,10 +458,20 @@ static DEFINE_TIMER(fd_timer, check_change);
static void fd_end_request_cur(blk_status_t err)
{
+ DPRINT(("fd_end_request_cur(), bytes %d of %d\n",
+ blk_rq_cur_bytes(fd_request),
+ blk_rq_bytes(fd_request)));
+
if (!blk_update_request(fd_request, err,
blk_rq_cur_bytes(fd_request))) {
+ DPRINT(("calling __blk_mq_end_request()\n"));
__blk_mq_end_request(fd_request, err);
fd_request = NULL;
+ } else {
+ /* requeue rest of request */
+ DPRINT(("calling blk_mq_requeue_request()\n"));
+ blk_mq_requeue_request(fd_request, true);
+ fd_request = NULL;
}
}
@@ -653,9 +665,6 @@ static inline void copy_buffer(void *from, void *to)
*p2++ = *p1++;
}
-
-
-
/* General Interrupt Handling */
static void (*FloppyIRQHandler)( int status ) = NULL;
@@ -700,12 +709,21 @@ static void fd_error( void )
if (fd_request->error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
fd_end_request_cur(BLK_STS_IOERR);
+ finish_fdc();
+ return;
}
else if (fd_request->error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
}
+ /* need to re-run request to recalibrate */
+ atari_disable_irq( IRQ_MFP_FDC );
+
+ setup_req_params( SelectedDrive );
+ do_fd_action( SelectedDrive );
+
+ atari_enable_irq( IRQ_MFP_FDC );
}
@@ -732,8 +750,10 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
if (type) {
type--;
if (type >= NUM_DISK_MINORS ||
- minor2disktype[type].drive_types > DriveType)
+ minor2disktype[type].drive_types > DriveType) {
+ finish_fdc();
return -EINVAL;
+ }
}
q = unit[drive].disk[type]->queue;
@@ -751,6 +771,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
+ finish_fdc();
ret = -EINVAL;
goto out;
}
@@ -791,6 +812,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
wait_for_completion(&format_wait);
+ finish_fdc();
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
@@ -825,6 +847,7 @@ static void do_fd_action( int drive )
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
+ finish_fdc();
return;
}
}
@@ -1229,6 +1252,7 @@ static void fd_rwsec_done1(int status)
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
+ finish_fdc();
}
return;
@@ -1350,7 +1374,7 @@ static void fd_times_out(struct timer_list *unused)
static void finish_fdc( void )
{
- if (!NeedSeek) {
+ if (!NeedSeek || !stdma_is_locked_by(floppy_irq)) {
finish_fdc_done( 0 );
}
else {
@@ -1385,7 +1409,8 @@ static void finish_fdc_done( int dummy )
start_motor_off_timer();
local_irq_save(flags);
- stdma_release();
+ if (stdma_is_locked_by(floppy_irq))
+ stdma_release();
local_irq_restore(flags);
DPRINT(("finish_fdc() finished\n"));
@@ -1435,8 +1460,7 @@ static int floppy_revalidate(struct gendisk *disk)
unsigned int drive = p - unit;
if (test_bit(drive, &changed_floppies) ||
- test_bit(drive, &fake_change) ||
- p->disktype == 0) {
+ test_bit(drive, &fake_change) || !p->disktype) {
if (UD.flags & FTD_MSG)
printk(KERN_ERR "floppy: clear format %p!\n", UDT);
BufferDrive = -1;
@@ -1475,15 +1499,6 @@ static void setup_req_params( int drive )
ReqTrack, ReqSector, (unsigned long)ReqData ));
}
-static void ataflop_commit_rqs(struct blk_mq_hw_ctx *hctx)
-{
- spin_lock_irq(&ataflop_lock);
- atari_disable_irq(IRQ_MFP_FDC);
- finish_fdc();
- atari_enable_irq(IRQ_MFP_FDC);
- spin_unlock_irq(&ataflop_lock);
-}
-
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1491,6 +1506,10 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
int drive = floppy - unit;
int type = floppy->type;
+ DPRINT(("Queue request: drive %d type %d sectors %d of %d last %d\n",
+ drive, type, blk_rq_cur_sectors(bd->rq),
+ blk_rq_sectors(bd->rq), bd->last));
+
spin_lock_irq(&ataflop_lock);
if (fd_request) {
spin_unlock_irq(&ataflop_lock);
@@ -1511,6 +1530,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
fd_end_request_cur(BLK_STS_IOERR);
+ stdma_release();
goto out;
}
@@ -1527,11 +1547,13 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
+ stdma_release();
goto out;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
+ stdma_release();
goto out;
}
type = minor2disktype[type].index;
@@ -1550,8 +1572,6 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
setup_req_params( drive );
do_fd_action( drive );
- if (bd->last)
- finish_fdc();
atari_enable_irq( IRQ_MFP_FDC );
out:
@@ -1634,6 +1654,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* what if type > 0 here? Overwrite specified entry ? */
if (type) {
/* refuse to re-set a predefined type for now */
+ finish_fdc();
return -EINVAL;
}
@@ -1701,8 +1722,10 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* sanity check */
if (setprm.track != dtp->blocks/dtp->spt/2 ||
- setprm.head != 2)
+ setprm.head != 2) {
+ finish_fdc();
return -EINVAL;
+ }
UDT = dtp;
set_capacity(disk, UDT->blocks);
@@ -1962,7 +1985,6 @@ static const struct block_device_operations floppy_fops = {
static const struct blk_mq_ops ataflop_mq_ops = {
.queue_rq = ataflop_queue_rq,
- .commit_rqs = ataflop_commit_rqs,
};
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
@@ -2000,12 +2022,28 @@ static void ataflop_probe(dev_t dev)
return;
mutex_lock(&ataflop_probe_lock);
if (!unit[drive].disk[type]) {
- if (ataflop_alloc_disk(drive, type) == 0)
+ if (ataflop_alloc_disk(drive, type) == 0) {
add_disk(unit[drive].disk[type]);
+ unit[drive].registered[type] = true;
+ }
}
mutex_unlock(&ataflop_probe_lock);
}
+static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs)
+{
+ int type;
+
+ for (type = 0; type < NUM_DISK_MINORS; type++) {
+ if (!fs->disk[type])
+ continue;
+ if (fs->registered[type])
+ del_gendisk(fs->disk[type]);
+ blk_cleanup_disk(fs->disk[type]);
+ }
+ blk_mq_free_tag_set(&fs->tag_set);
+}
+
static int __init atari_floppy_init (void)
{
int i;
@@ -2064,7 +2102,10 @@ static int __init atari_floppy_init (void)
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].track = -1;
unit[i].flags = 0;
- add_disk(unit[i].disk[0]);
+ ret = add_disk(unit[i].disk[0]);
+ if (ret)
+ goto err_out_dma;
+ unit[i].registered[0] = true;
}
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
@@ -2074,12 +2115,11 @@ static int __init atari_floppy_init (void)
return 0;
+err_out_dma:
+ atari_stram_free(DMABuffer);
err:
- while (--i >= 0) {
- blk_cleanup_queue(unit[i].disk[0]->queue);
- put_disk(unit[i].disk[0]);
- blk_mq_free_tag_set(&unit[i].tag_set);
- }
+ while (--i >= 0)
+ atari_cleanup_floppy_disk(&unit[i]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_unlock:
@@ -2128,18 +2168,10 @@ __setup("floppy=", atari_floppy_setup);
static void __exit atari_floppy_exit(void)
{
- int i, type;
+ int i;
- for (i = 0; i < FD_MAX_UNITS; i++) {
- for (type = 0; type < NUM_DISK_MINORS; type++) {
- if (!unit[i].disk[type])
- continue;
- del_gendisk(unit[i].disk[type]);
- blk_cleanup_queue(unit[i].disk[type]->queue);
- put_disk(unit[i].disk[type]);
- }
- blk_mq_free_tag_set(&unit[i].tag_set);
- }
+ for (i = 0; i < FD_MAX_UNITS; i++)
+ atari_cleanup_floppy_disk(&unit[i]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
del_timer_sync(&fd_timer);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 530b31240203..aa0472718dce 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -282,7 +282,7 @@ out:
return err;
}
-static blk_qc_t brd_submit_bio(struct bio *bio)
+static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
sector_t sector = bio->bi_iter.bi_sector;
@@ -299,16 +299,14 @@ static blk_qc_t brd_submit_bio(struct bio *bio)
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
bio_op(bio), sector);
- if (err)
- goto io_error;
+ if (err) {
+ bio_io_error(bio);
+ return;
+ }
sector += len >> SECTOR_SHIFT;
}
bio_endio(bio);
- return BLK_QC_T_NONE;
-io_error:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
deleted file mode 100644
index f0a91faa43a8..000000000000
--- a/drivers/block/cryptoloop.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- Linux loop encryption enabling module
-
- Copyright (C) 2002 Herbert Valerio Riedel <hvr@gnu.org>
- Copyright (C) 2003 Fruhwirth Clemens <clemens@endorphin.org>
-
- */
-
-#include <linux/module.h>
-
-#include <crypto/skcipher.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/blkdev.h>
-#include <linux/scatterlist.h>
-#include <linux/uaccess.h>
-#include "loop.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
-MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
-
-#define LOOP_IV_SECTOR_BITS 9
-#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS)
-
-static int
-cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
-{
- int err = -EINVAL;
- int cipher_len;
- int mode_len;
- char cms[LO_NAME_SIZE]; /* cipher-mode string */
- char *mode;
- char *cmsp = cms; /* c-m string pointer */
- struct crypto_sync_skcipher *tfm;
-
- /* encryption breaks for non sector aligned offsets */
-
- if (info->lo_offset % LOOP_IV_SECTOR_SIZE)
- goto out;
-
- strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
- cms[LO_NAME_SIZE - 1] = 0;
-
- cipher_len = strcspn(cmsp, "-");
-
- mode = cmsp + cipher_len;
- mode_len = 0;
- if (*mode) {
- mode++;
- mode_len = strcspn(mode, "-");
- }
-
- if (!mode_len) {
- mode = "cbc";
- mode_len = 3;
- }
-
- if (cipher_len + mode_len + 3 > LO_NAME_SIZE)
- return -EINVAL;
-
- memmove(cms, mode, mode_len);
- cmsp = cms + mode_len;
- *cmsp++ = '(';
- memcpy(cmsp, info->lo_crypt_name, cipher_len);
- cmsp += cipher_len;
- *cmsp++ = ')';
- *cmsp = 0;
-
- tfm = crypto_alloc_sync_skcipher(cms, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- err = crypto_sync_skcipher_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
-
- if (err != 0)
- goto out_free_tfm;
-
- lo->key_data = tfm;
- return 0;
-
- out_free_tfm:
- crypto_free_sync_skcipher(tfm);
-
- out:
- return err;
-}
-
-
-typedef int (*encdec_cbc_t)(struct skcipher_request *req);
-
-static int
-cryptoloop_transfer(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t IV)
-{
- struct crypto_sync_skcipher *tfm = lo->key_data;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg_out;
- struct scatterlist sg_in;
-
- encdec_cbc_t encdecfunc;
- struct page *in_page, *out_page;
- unsigned in_offs, out_offs;
- int err;
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
-
- sg_init_table(&sg_out, 1);
- sg_init_table(&sg_in, 1);
-
- if (cmd == READ) {
- in_page = raw_page;
- in_offs = raw_off;
- out_page = loop_page;
- out_offs = loop_off;
- encdecfunc = crypto_skcipher_decrypt;
- } else {
- in_page = loop_page;
- in_offs = loop_off;
- out_page = raw_page;
- out_offs = raw_off;
- encdecfunc = crypto_skcipher_encrypt;
- }
-
- while (size > 0) {
- const int sz = min(size, LOOP_IV_SECTOR_SIZE);
- u32 iv[4] = { 0, };
- iv[0] = cpu_to_le32(IV & 0xffffffff);
-
- sg_set_page(&sg_in, in_page, sz, in_offs);
- sg_set_page(&sg_out, out_page, sz, out_offs);
-
- skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv);
- err = encdecfunc(req);
- if (err)
- goto out;
-
- IV++;
- size -= sz;
- in_offs += sz;
- out_offs += sz;
- }
-
- err = 0;
-
-out:
- skcipher_request_zero(req);
- return err;
-}
-
-static int
-cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
-{
- return -EINVAL;
-}
-
-static int
-cryptoloop_release(struct loop_device *lo)
-{
- struct crypto_sync_skcipher *tfm = lo->key_data;
- if (tfm != NULL) {
- crypto_free_sync_skcipher(tfm);
- lo->key_data = NULL;
- return 0;
- }
- printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n");
- return -EINVAL;
-}
-
-static struct loop_func_table cryptoloop_funcs = {
- .number = LO_CRYPT_CRYPTOAPI,
- .init = cryptoloop_init,
- .ioctl = cryptoloop_ioctl,
- .transfer = cryptoloop_transfer,
- .release = cryptoloop_release,
- .owner = THIS_MODULE
-};
-
-static int __init
-init_cryptoloop(void)
-{
- int rc = loop_register_transfer(&cryptoloop_funcs);
-
- if (rc)
- printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
- else
- pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
- return rc;
-}
-
-static void __exit
-cleanup_cryptoloop(void)
-{
- if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI))
- printk(KERN_ERR
- "cryptoloop: loop_unregister_transfer failed\n");
-}
-
-module_init(init_cryptoloop);
-module_exit(cleanup_cryptoloop);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 5d9181382ce1..f27d5b0f9a0b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1448,7 +1448,7 @@ extern void conn_free_crypto(struct drbd_connection *connection);
/* drbd_req */
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *);
-extern blk_qc_t drbd_submit_bio(struct bio *bio);
+void drbd_submit_bio(struct bio *bio);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
@@ -1826,8 +1826,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
/* Returns the number of 512 byte sectors of the device */
static inline sector_t drbd_get_capacity(struct block_device *bdev)
{
- /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
- return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
+ return bdev ? bdev_nr_sectors(bdev) : 0;
}
/**
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 55234a558e98..19db80a1e409 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2794,7 +2794,9 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
goto out_idr_remove_vol;
}
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_cleanup_disk;
/* inherit the connection state */
device->state.conn = first_connection(resource)->cstate;
@@ -2808,6 +2810,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_debugfs_device_add(device);
return NO_ERROR;
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
out_idr_remove_vol:
idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 5ca233644d70..3235532ae077 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1596,7 +1596,7 @@ void do_submit(struct work_struct *ws)
}
}
-blk_qc_t drbd_submit_bio(struct bio *bio)
+void drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
@@ -1609,7 +1609,6 @@ blk_qc_t drbd_submit_bio(struct bio *bio)
inc_ap_bio(device);
__drbd_make_request(device, bio);
- return BLK_QC_T_NONE;
}
static bool net_timeout_reached(struct drbd_request *net_req,
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index fef79ea52e3e..3873e789478e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -184,6 +184,7 @@ static int print_unex = 1;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/major.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
@@ -4478,6 +4479,7 @@ static const struct blk_mq_ops floppy_mq_ops = {
};
static struct platform_device floppy_device[N_DRIVE];
+static bool registered[N_DRIVE];
static bool floppy_available(int drive)
{
@@ -4693,8 +4695,12 @@ static int __init do_floppy_init(void)
if (err)
goto out_remove_drives;
- device_add_disk(&floppy_device[drive].dev, disks[drive][0],
- NULL);
+ registered[drive] = true;
+
+ err = device_add_disk(&floppy_device[drive].dev,
+ disks[drive][0], NULL);
+ if (err)
+ goto out_remove_drives;
}
return 0;
@@ -4703,7 +4709,8 @@ out_remove_drives:
while (drive--) {
if (floppy_available(drive)) {
del_gendisk(disks[drive][0]);
- platform_device_unregister(&floppy_device[drive]);
+ if (registered[drive])
+ platform_device_unregister(&floppy_device[drive]);
}
}
out_release_dma:
@@ -4946,30 +4953,14 @@ static void __exit floppy_module_exit(void)
if (disks[drive][i])
del_gendisk(disks[drive][i]);
}
- platform_device_unregister(&floppy_device[drive]);
+ if (registered[drive])
+ platform_device_unregister(&floppy_device[drive]);
}
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
- blk_cleanup_queue(disks[drive][i]->queue);
+ blk_cleanup_disk(disks[drive][i]);
}
blk_mq_free_tag_set(&tag_sets[drive]);
-
- /*
- * These disks have not called add_disk(). Don't put down
- * queue reference in put_disk().
- */
- if (!(allowed_drive_mask & (1 << drive)) ||
- fdc_state[FDC(drive)].version == FDC_NONE) {
- for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
- if (disks[drive][i])
- disks[drive][i]->queue = NULL;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
- if (disks[drive][i])
- put_disk(disks[drive][i]);
- }
}
cancel_delayed_work_sync(&fd_timeout);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 7bf4686af774..3c09a33fa1c7 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -133,58 +133,6 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
-static int transfer_xor(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block)
-{
- char *raw_buf = kmap_atomic(raw_page) + raw_off;
- char *loop_buf = kmap_atomic(loop_page) + loop_off;
- char *in, *out, *key;
- int i, keysize;
-
- if (cmd == READ) {
- in = raw_buf;
- out = loop_buf;
- } else {
- in = loop_buf;
- out = raw_buf;
- }
-
- key = lo->lo_encrypt_key;
- keysize = lo->lo_encrypt_key_size;
- for (i = 0; i < size; i++)
- *out++ = *in++ ^ key[(i & 511) % keysize];
-
- kunmap_atomic(loop_buf);
- kunmap_atomic(raw_buf);
- cond_resched();
- return 0;
-}
-
-static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
-{
- if (unlikely(info->lo_encrypt_key_size <= 0))
- return -EINVAL;
- return 0;
-}
-
-static struct loop_func_table none_funcs = {
- .number = LO_CRYPT_NONE,
-};
-
-static struct loop_func_table xor_funcs = {
- .number = LO_CRYPT_XOR,
- .transfer = transfer_xor,
- .init = xor_init
-};
-
-/* xfer_funcs[0] is special - its release function is never called */
-static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
- &none_funcs,
- &xor_funcs
-};
-
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
{
loff_t loopsize;
@@ -228,8 +176,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
/*
* We support direct I/O only if lo_offset is aligned with the
* logical I/O size of backing device, and the logical block
- * size of loop is bigger than the backing device's and the loop
- * needn't transform transfer.
+ * size of loop is bigger than the backing device's.
*
* TODO: the above condition may be loosed in the future, and
* direct I/O may be switched runtime at that time because most
@@ -238,8 +185,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
if (dio) {
if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
!(lo->lo_offset & dio_align) &&
- mapping->a_ops->direct_IO &&
- !lo->transfer)
+ mapping->a_ops->direct_IO)
use_dio = true;
else
use_dio = false;
@@ -273,19 +219,6 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
}
/**
- * loop_validate_block_size() - validates the passed in block size
- * @bsize: size to validate
- */
-static int
-loop_validate_block_size(unsigned short bsize)
-{
- if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
- return -EINVAL;
-
- return 0;
-}
-
-/**
* loop_set_size() - sets device size and notifies userspace
* @lo: struct loop_device to set the size for
* @size: new size of the loop device
@@ -299,24 +232,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
-static inline int
-lo_do_transfer(struct loop_device *lo, int cmd,
- struct page *rpage, unsigned roffs,
- struct page *lpage, unsigned loffs,
- int size, sector_t rblock)
-{
- int ret;
-
- ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
- if (likely(!ret))
- return 0;
-
- printk_ratelimited(KERN_ERR
- "loop: Transfer error at byte offset %llu, length %i.\n",
- (unsigned long long)rblock << 9, size);
- return ret;
-}
-
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
@@ -356,41 +271,6 @@ static int lo_write_simple(struct loop_device *lo, struct request *rq,
return ret;
}
-/*
- * This is the slow, transforming version that needs to double buffer the
- * data as it cannot do the transformations in place without having direct
- * access to the destination pages of the backing file.
- */
-static int lo_write_transfer(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec, b;
- struct req_iterator iter;
- struct page *page;
- int ret = 0;
-
- page = alloc_page(GFP_NOIO);
- if (unlikely(!page))
- return -ENOMEM;
-
- rq_for_each_segment(bvec, rq, iter) {
- ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
- bvec.bv_offset, bvec.bv_len, pos >> 9);
- if (unlikely(ret))
- break;
-
- b.bv_page = page;
- b.bv_offset = 0;
- b.bv_len = bvec.bv_len;
- ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
- if (ret < 0)
- break;
- }
-
- __free_page(page);
- return ret;
-}
-
static int lo_read_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
@@ -420,64 +300,12 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
return 0;
}
-static int lo_read_transfer(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec, b;
- struct req_iterator iter;
- struct iov_iter i;
- struct page *page;
- ssize_t len;
- int ret = 0;
-
- page = alloc_page(GFP_NOIO);
- if (unlikely(!page))
- return -ENOMEM;
-
- rq_for_each_segment(bvec, rq, iter) {
- loff_t offset = pos;
-
- b.bv_page = page;
- b.bv_offset = 0;
- b.bv_len = bvec.bv_len;
-
- iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
- len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
- if (len < 0) {
- ret = len;
- goto out_free_page;
- }
-
- ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
- bvec.bv_offset, len, offset >> 9);
- if (ret)
- goto out_free_page;
-
- flush_dcache_page(bvec.bv_page);
-
- if (len != bvec.bv_len) {
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- zero_fill_bio(bio);
- break;
- }
- }
-
- ret = 0;
-out_free_page:
- __free_page(page);
- return ret;
-}
-
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
/*
* We use fallocate to manipulate the space mappings used by the image
- * a.k.a. discard/zerorange. However we do not support this if
- * encryption is enabled, because it may give an attacker useful
- * information.
+ * a.k.a. discard/zerorange.
*/
struct file *file = lo->lo_backing_file;
struct request_queue *q = lo->lo_queue;
@@ -554,7 +382,7 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
blk_mq_complete_request(rq);
}
-static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
+static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
@@ -627,7 +455,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
lo_rw_aio_do_completion(cmd);
if (ret != -EIOCBQUEUED)
- cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
+ lo_rw_aio_complete(&cmd->iocb, ret);
return 0;
}
@@ -660,16 +488,12 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
- if (lo->transfer)
- return lo_write_transfer(lo, rq, pos);
- else if (cmd->use_aio)
+ if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, WRITE);
else
return lo_write_simple(lo, rq, pos);
case REQ_OP_READ:
- if (lo->transfer)
- return lo_read_transfer(lo, rq, pos);
- else if (cmd->use_aio)
+ if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, READ);
else
return lo_read_simple(lo, rq, pos);
@@ -934,7 +758,7 @@ static void loop_config_discard(struct loop_device *lo)
* not blkdev_issue_discard(). This maintains consistent behavior with
* file-backed loop devices: discarded regions read back as zero.
*/
- if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
+ if (S_ISBLK(inode->i_mode)) {
struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
@@ -943,11 +767,9 @@ static void loop_config_discard(struct loop_device *lo)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do not support discard if
- * encryption is enabled, because it may give an attacker
- * useful information.
+ * image a.k.a. discard.
*/
- } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
+ } else if (!file->f_op->fallocate) {
max_discard_sectors = 0;
granularity = 0;
@@ -1084,43 +906,6 @@ static void loop_update_rotational(struct loop_device *lo)
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
}
-static int
-loop_release_xfer(struct loop_device *lo)
-{
- int err = 0;
- struct loop_func_table *xfer = lo->lo_encryption;
-
- if (xfer) {
- if (xfer->release)
- err = xfer->release(lo);
- lo->transfer = NULL;
- lo->lo_encryption = NULL;
- module_put(xfer->owner);
- }
- return err;
-}
-
-static int
-loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
- const struct loop_info64 *i)
-{
- int err = 0;
-
- if (xfer) {
- struct module *owner = xfer->owner;
-
- if (!try_module_get(owner))
- return -EINVAL;
- if (xfer->init)
- err = xfer->init(lo, i);
- if (err)
- module_put(owner);
- else
- lo->lo_encryption = xfer;
- }
- return err;
-}
-
/**
* loop_set_status_from_info - configure device from loop_info
* @lo: struct loop_device to configure
@@ -1133,55 +918,27 @@ static int
loop_set_status_from_info(struct loop_device *lo,
const struct loop_info64 *info)
{
- int err;
- struct loop_func_table *xfer;
- kuid_t uid = current_uid();
-
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
return -EINVAL;
- err = loop_release_xfer(lo);
- if (err)
- return err;
-
- if (info->lo_encrypt_type) {
- unsigned int type = info->lo_encrypt_type;
-
- if (type >= MAX_LO_CRYPT)
- return -EINVAL;
- xfer = xfer_funcs[type];
- if (xfer == NULL)
- return -EINVAL;
- } else
- xfer = NULL;
-
- err = loop_init_xfer(lo, xfer, info);
- if (err)
- return err;
+ switch (info->lo_encrypt_type) {
+ case LO_CRYPT_NONE:
+ break;
+ case LO_CRYPT_XOR:
+ pr_warn("support for the xor transformation has been removed.\n");
+ return -EINVAL;
+ case LO_CRYPT_CRYPTOAPI:
+ pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n");
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
- memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
-
- if (!xfer)
- xfer = &none_funcs;
- lo->transfer = xfer->transfer;
- lo->ioctl = xfer->ioctl;
-
lo->lo_flags = info->lo_flags;
-
- lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
- lo->lo_init[0] = info->lo_init[0];
- lo->lo_init[1] = info->lo_init[1];
- if (info->lo_encrypt_key_size) {
- memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
- lo->lo_key_owner = uid;
- }
-
return 0;
}
@@ -1236,7 +993,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
}
if (config->block_size) {
- error = loop_validate_block_size(config->block_size);
+ error = blk_validate_block_size(config->block_size);
if (error)
goto out_unlock;
}
@@ -1329,7 +1086,6 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp = NULL;
gfp_t gfp = lo->old_gfp_mask;
- struct block_device *bdev = lo->lo_device;
int err = 0;
bool partscan = false;
int lo_number;
@@ -1381,36 +1137,23 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
- loop_release_xfer(lo);
- lo->transfer = NULL;
- lo->ioctl = NULL;
lo->lo_device = NULL;
- lo->lo_encryption = NULL;
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
- lo->lo_encrypt_key_size = 0;
- memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
- memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
blk_queue_logical_block_size(lo->lo_queue, 512);
blk_queue_physical_block_size(lo->lo_queue, 512);
blk_queue_io_min(lo->lo_queue, 512);
- if (bdev) {
- invalidate_bdev(bdev);
- bdev->bd_inode->i_mapping->wb_err = 0;
- }
- set_capacity(lo->lo_disk, 0);
+ invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo);
- if (bdev) {
- /* let user-space know about this change */
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
- }
+ /* let user-space know about this change */
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue);
- partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
lo_number = lo->lo_number;
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
out_unlock:
@@ -1498,7 +1241,6 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- kuid_t uid = current_uid();
int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
@@ -1506,12 +1248,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
- if (lo->lo_encrypt_key_size &&
- !uid_eq(lo->lo_key_owner, uid) &&
- !capable(CAP_SYS_ADMIN)) {
- err = -EPERM;
- goto out_unlock;
- }
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
@@ -1597,14 +1333,6 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
info->lo_sizelimit = lo->lo_sizelimit;
info->lo_flags = lo->lo_flags;
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
- memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
- info->lo_encrypt_type =
- lo->lo_encryption ? lo->lo_encryption->number : 0;
- if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
- info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
- memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
- lo->lo_encrypt_key_size);
- }
/* Drop lo_mutex while we call into the filesystem. */
path = lo->lo_backing_file->f_path;
@@ -1630,16 +1358,8 @@ loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
info64->lo_rdevice = info->lo_rdevice;
info64->lo_offset = info->lo_offset;
info64->lo_sizelimit = 0;
- info64->lo_encrypt_type = info->lo_encrypt_type;
- info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
info64->lo_flags = info->lo_flags;
- info64->lo_init[0] = info->lo_init[0];
- info64->lo_init[1] = info->lo_init[1];
- if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
- else
- memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
- memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
}
static int
@@ -1651,16 +1371,8 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
info->lo_inode = info64->lo_inode;
info->lo_rdevice = info64->lo_rdevice;
info->lo_offset = info64->lo_offset;
- info->lo_encrypt_type = info64->lo_encrypt_type;
- info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
info->lo_flags = info64->lo_flags;
- info->lo_init[0] = info64->lo_init[0];
- info->lo_init[1] = info64->lo_init[1];
- if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
- else
- memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
- memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info->lo_device != info64->lo_device ||
@@ -1759,7 +1471,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- err = loop_validate_block_size(arg);
+ err = blk_validate_block_size(arg);
if (err)
return err;
@@ -1809,7 +1521,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
err = loop_set_block_size(lo, arg);
break;
default:
- err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+ err = -EINVAL;
}
mutex_unlock(&lo->lo_mutex);
return err;
@@ -1885,7 +1597,6 @@ struct compat_loop_info {
compat_ulong_t lo_inode; /* ioctl r/o */
compat_dev_t lo_rdevice; /* ioctl r/o */
compat_int_t lo_offset;
- compat_int_t lo_encrypt_type;
compat_int_t lo_encrypt_key_size; /* ioctl w/o */
compat_int_t lo_flags; /* ioctl r/o */
char lo_name[LO_NAME_SIZE];
@@ -1914,16 +1625,8 @@ loop_info64_from_compat(const struct compat_loop_info __user *arg,
info64->lo_rdevice = info.lo_rdevice;
info64->lo_offset = info.lo_offset;
info64->lo_sizelimit = 0;
- info64->lo_encrypt_type = info.lo_encrypt_type;
- info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
info64->lo_flags = info.lo_flags;
- info64->lo_init[0] = info.lo_init[0];
- info64->lo_init[1] = info.lo_init[1];
- if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
- else
- memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
- memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
return 0;
}
@@ -1943,24 +1646,14 @@ loop_info64_to_compat(const struct loop_info64 *info64,
info.lo_inode = info64->lo_inode;
info.lo_rdevice = info64->lo_rdevice;
info.lo_offset = info64->lo_offset;
- info.lo_encrypt_type = info64->lo_encrypt_type;
- info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
info.lo_flags = info64->lo_flags;
- info.lo_init[0] = info64->lo_init[0];
- info.lo_init[1] = info64->lo_init[1];
- if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
- else
- memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
- memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info.lo_device != info64->lo_device ||
info.lo_rdevice != info64->lo_rdevice ||
info.lo_inode != info64->lo_inode ||
- info.lo_offset != info64->lo_offset ||
- info.lo_init[0] != info64->lo_init[0] ||
- info.lo_init[1] != info64->lo_init[1])
+ info.lo_offset != info64->lo_offset)
return -EOVERFLOW;
if (copy_to_user(arg, &info, sizeof(info)))
@@ -2101,43 +1794,6 @@ MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
-int loop_register_transfer(struct loop_func_table *funcs)
-{
- unsigned int n = funcs->number;
-
- if (n >= MAX_LO_CRYPT || xfer_funcs[n])
- return -EINVAL;
- xfer_funcs[n] = funcs;
- return 0;
-}
-
-int loop_unregister_transfer(int number)
-{
- unsigned int n = number;
- struct loop_func_table *xfer;
-
- if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
- return -EINVAL;
- /*
- * This function is called from only cleanup_cryptoloop().
- * Given that each loop device that has a transfer enabled holds a
- * reference to the module implementing it we should never get here
- * with a transfer that is set (unless forced module unloading is
- * requested). Thus, check module's refcount and warn if this is
- * not a clean unloading.
- */
-#ifdef CONFIG_MODULE_UNLOAD
- if (xfer->owner && module_refcount(xfer->owner) != -1)
- pr_err("Danger! Unregistering an in use transfer function.\n");
-#endif
-
- xfer_funcs[n] = NULL;
- return 0;
-}
-
-EXPORT_SYMBOL(loop_register_transfer);
-EXPORT_SYMBOL(loop_unregister_transfer);
-
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -2394,13 +2050,19 @@ static int loop_add(int i)
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
sprintf(disk->disk_name, "loop%d", i);
/* Make this loop device reachable from pathname. */
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_cleanup_disk;
+
/* Show this loop device. */
mutex_lock(&loop_ctl_mutex);
lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex);
+
return i;
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 04c88dd6eabd..082d4b6bfc6a 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -32,23 +32,10 @@ struct loop_device {
loff_t lo_offset;
loff_t lo_sizelimit;
int lo_flags;
- int (*transfer)(struct loop_device *, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block);
char lo_file_name[LO_NAME_SIZE];
- char lo_crypt_name[LO_NAME_SIZE];
- char lo_encrypt_key[LO_KEY_SIZE];
- int lo_encrypt_key_size;
- struct loop_func_table *lo_encryption;
- __u32 lo_init[2];
- kuid_t lo_key_owner; /* Who set the key */
- int (*ioctl)(struct loop_device *, int cmd,
- unsigned long arg);
struct file * lo_backing_file;
struct block_device *lo_device;
- void *key_data;
gfp_t old_gfp_mask;
@@ -82,21 +69,4 @@ struct loop_cmd {
struct cgroup_subsys_state *memcg_css;
};
-/* Support for loadable transfer modules */
-struct loop_func_table {
- int number; /* filter type */
- int (*transfer)(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block);
- int (*init)(struct loop_device *, const struct loop_info64 *);
- /* release is called from loop_unregister_transfer or clr_fd */
- int (*release)(struct loop_device *);
- int (*ioctl)(struct loop_device *, int cmd, unsigned long arg);
- struct module *owner;
-};
-
-int loop_register_transfer(struct loop_func_table *funcs);
-int loop_unregister_transfer(int number);
-
#endif
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 901855717cb5..c91b9010c1a6 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3633,7 +3633,9 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
- device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
+ rv = device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
+ if (rv)
+ goto read_capacity_error;
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -4061,7 +4063,6 @@ block_initialize_err:
msi_initialize_err:
if (dd->isr_workq) {
- flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);
@@ -4119,7 +4120,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
mtip_block_remove(dd);
if (dd->isr_workq) {
- flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 26798da661bd..78282f01f581 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -84,7 +84,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
return true;
}
-static blk_qc_t n64cart_submit_bio(struct bio *bio)
+static void n64cart_submit_bio(struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
@@ -92,16 +92,14 @@ static blk_qc_t n64cart_submit_bio(struct bio *bio)
u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
bio_for_each_segment(bvec, bio, iter) {
- if (!n64cart_do_bvec(dev, &bvec, pos))
- goto io_error;
+ if (!n64cart_do_bvec(dev, &bvec, pos)) {
+ bio_io_error(bio);
+ return;
+ }
pos += bvec.bv_len;
}
bio_endio(bio);
- return BLK_QC_T_NONE;
-io_error:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static const struct block_device_operations n64cart_fops = {
@@ -117,6 +115,7 @@ static const struct block_device_operations n64cart_fops = {
static int __init n64cart_probe(struct platform_device *pdev)
{
struct gendisk *disk;
+ int err = -ENOMEM;
if (!start || !size) {
pr_err("start or size not specified\n");
@@ -134,7 +133,7 @@ static int __init n64cart_probe(struct platform_device *pdev)
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- return -ENOMEM;
+ goto out;
disk->first_minor = 0;
disk->flags = GENHD_FL_NO_PART_SCAN;
@@ -149,11 +148,18 @@ static int __init n64cart_probe(struct platform_device *pdev)
blk_queue_physical_block_size(disk->queue, 4096);
blk_queue_logical_block_size(disk->queue, 4096);
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_cleanup_disk;
pr_info("n64cart: %u kb disk\n", size / 1024);
return 0;
+
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
+out:
+ return err;
}
static struct platform_driver n64cart_driver = {
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 1183f7872b71..b47b2a87ae8f 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -122,15 +122,21 @@ struct nbd_device {
struct work_struct remove_work;
struct list_head list;
- struct task_struct *task_recv;
struct task_struct *task_setup;
unsigned long flags;
+ pid_t pid; /* pid of nbd-client, if attached */
char *backend;
};
#define NBD_CMD_REQUEUED 1
+/*
+ * This flag will be set if nbd_queue_rq() succeed, and will be checked and
+ * cleared in completion. Both setting and clearing of the flag are protected
+ * by cmd->lock.
+ */
+#define NBD_CMD_INFLIGHT 2
struct nbd_cmd {
struct nbd_device *nbd;
@@ -217,7 +223,7 @@ static ssize_t pid_show(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
- return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
+ return sprintf(buf, "%d\n", nbd->pid);
}
static const struct device_attribute pid_attr = {
@@ -310,26 +316,19 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static void nbd_size_clear(struct nbd_device *nbd)
-{
- if (nbd->config->bytesize) {
- set_capacity(nbd->disk, 0);
- kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
- }
-}
-
static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
loff_t blksize)
{
if (!blksize)
blksize = 1u << NBD_DEF_BLKSIZE_BITS;
- if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
+
+ if (blk_validate_block_size(blksize))
return -EINVAL;
nbd->config->bytesize = bytesize;
nbd->config->blksize_bits = __ffs(blksize);
- if (!nbd->task_recv)
+ if (!nbd->pid)
return 0;
if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
@@ -405,6 +404,11 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
+ if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return BLK_EH_DONE;
+ }
+
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
mutex_unlock(&cmd->lock);
@@ -484,7 +488,8 @@ done:
}
/*
- * Send or receive packet.
+ * Send or receive packet. Return a positive value on success and
+ * negtive value on failue, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
@@ -610,7 +615,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
trace_nbd_header_sent(req, handle);
- if (result <= 0) {
+ if (result < 0) {
if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
* however if we have sent something we need to make
@@ -654,7 +659,7 @@ send_pages:
skip = 0;
}
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
- if (result <= 0) {
+ if (result < 0) {
if (was_interrupted(result)) {
/* We've already sent the header, we
* have no choice but to set pending and
@@ -688,38 +693,45 @@ out:
return 0;
}
-/* NULL returned = something went wrong, inform userspace */
-static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
+static int nbd_read_reply(struct nbd_device *nbd, int index,
+ struct nbd_reply *reply)
{
- struct nbd_config *config = nbd->config;
- int result;
- struct nbd_reply reply;
- struct nbd_cmd *cmd;
- struct request *req = NULL;
- u64 handle;
- u16 hwq;
- u32 tag;
- struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
+ struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
struct iov_iter to;
- int ret = 0;
+ int result;
- reply.magic = 0;
- iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
+ reply->magic = 0;
+ iov_iter_kvec(&to, READ, &iov, 1, sizeof(*reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
- if (result <= 0) {
- if (!nbd_disconnected(config))
+ if (result < 0) {
+ if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk),
"Receive control failed (result %d)\n", result);
- return ERR_PTR(result);
+ return result;
}
- if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
+ if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
- (unsigned long)ntohl(reply.magic));
- return ERR_PTR(-EPROTO);
+ (unsigned long)ntohl(reply->magic));
+ return -EPROTO;
}
- memcpy(&handle, reply.handle, sizeof(handle));
+ return 0;
+}
+
+/* NULL returned = something went wrong, inform userspace */
+static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
+ struct nbd_reply *reply)
+{
+ int result;
+ struct nbd_cmd *cmd;
+ struct request *req = NULL;
+ u64 handle;
+ u16 hwq;
+ u32 tag;
+ int ret = 0;
+
+ memcpy(&handle, reply->handle, sizeof(handle));
tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@@ -734,6 +746,16 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
+ if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
+ tag, cmd->status, cmd->flags);
+ ret = -ENOENT;
+ goto out;
+ }
+ if (cmd->index != index) {
+ dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
+ tag, index, cmd->index);
+ }
if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
@@ -752,9 +774,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
ret = -ENOENT;
goto out;
}
- if (ntohl(reply.error)) {
+ if (ntohl(reply->error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
- ntohl(reply.error));
+ ntohl(reply->error));
cmd->status = BLK_STS_IOERR;
goto out;
}
@@ -763,11 +785,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (rq_data_dir(req) != WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
+ struct iov_iter to;
rq_for_each_segment(bvec, req, iter) {
iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
- if (result <= 0) {
+ if (result < 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
/*
@@ -776,7 +799,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
* and let the timeout stuff handle resubmitting
* this request onto another connection.
*/
- if (nbd_disconnected(config)) {
+ if (nbd_disconnected(nbd->config)) {
cmd->status = BLK_STS_IOERR;
goto out;
}
@@ -800,24 +823,46 @@ static void recv_work(struct work_struct *work)
work);
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
+ struct request_queue *q = nbd->disk->queue;
+ struct nbd_sock *nsock;
struct nbd_cmd *cmd;
struct request *rq;
while (1) {
- cmd = nbd_read_stat(nbd, args->index);
- if (IS_ERR(cmd)) {
- struct nbd_sock *nsock = config->socks[args->index];
+ struct nbd_reply reply;
- mutex_lock(&nsock->tx_lock);
- nbd_mark_nsock_dead(nbd, nsock, 1);
- mutex_unlock(&nsock->tx_lock);
+ if (nbd_read_reply(nbd, args->index, &reply))
+ break;
+
+ /*
+ * Grab .q_usage_counter so request pool won't go away, then no
+ * request use-after-free is possible during nbd_handle_reply().
+ * If queue is frozen, there won't be any inflight requests, we
+ * needn't to handle the incoming garbage message.
+ */
+ if (!percpu_ref_tryget(&q->q_usage_counter)) {
+ dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
+ __func__);
+ break;
+ }
+
+ cmd = nbd_handle_reply(nbd, args->index, &reply);
+ if (IS_ERR(cmd)) {
+ percpu_ref_put(&q->q_usage_counter);
break;
}
rq = blk_mq_rq_from_pdu(cmd);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
+ percpu_ref_put(&q->q_usage_counter);
}
+
+ nsock = config->socks[args->index];
+ mutex_lock(&nsock->tx_lock);
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ mutex_unlock(&nsock->tx_lock);
+
nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
@@ -833,6 +878,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
return true;
mutex_lock(&cmd->lock);
+ if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return true;
+ }
cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock);
@@ -914,7 +963,6 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
if (!refcount_inc_not_zero(&nbd->config_refs)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
- blk_mq_start_request(req);
return -EINVAL;
}
config = nbd->config;
@@ -923,7 +971,6 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
- blk_mq_start_request(req);
return -EINVAL;
}
cmd->status = BLK_STS_OK;
@@ -947,7 +994,6 @@ again:
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
- blk_mq_start_request(req);
return -EIO;
}
goto again;
@@ -969,7 +1015,13 @@ again:
* returns EAGAIN can be retried on a different socket.
*/
ret = nbd_send_cmd(nbd, cmd, index);
- if (ret == -EAGAIN) {
+ /*
+ * Access to this flag is protected by cmd->lock, thus it's safe to set
+ * the flag after nbd_send_cmd() succeed to send request to server.
+ */
+ if (!ret)
+ __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+ else if (ret == -EAGAIN) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
@@ -1206,7 +1258,7 @@ static void send_disconnects(struct nbd_device *nbd)
iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
- if (ret <= 0)
+ if (ret < 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
mutex_unlock(&nsock->tx_lock);
@@ -1237,11 +1289,13 @@ static void nbd_config_put(struct nbd_device *nbd)
&nbd->config_lock)) {
struct nbd_config *config = nbd->config;
nbd_dev_dbg_close(nbd);
- nbd_size_clear(nbd);
+ invalidate_disk(nbd->disk);
+ if (nbd->config->bytesize)
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
&config->runtime_flags))
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
- nbd->task_recv = NULL;
+ nbd->pid = 0;
if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
&config->runtime_flags)) {
device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
@@ -1282,7 +1336,7 @@ static int nbd_start_device(struct nbd_device *nbd)
int num_connections = config->num_connections;
int error = 0, i;
- if (nbd->task_recv)
+ if (nbd->pid)
return -EBUSY;
if (!config->socks)
return -EINVAL;
@@ -1301,7 +1355,7 @@ static int nbd_start_device(struct nbd_device *nbd)
}
blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
- nbd->task_recv = current;
+ nbd->pid = task_pid_nr(current);
nbd_parse_flags(nbd);
@@ -1557,8 +1611,8 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
{
struct nbd_device *nbd = s->private;
- if (nbd->task_recv)
- seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
+ if (nbd->pid)
+ seq_printf(s, "recv: %d\n", nbd->pid);
return 0;
}
@@ -1762,7 +1816,9 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_err_disk;
/*
* Now publish the device.
@@ -1771,6 +1827,8 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd_total_devices++;
return nbd;
+out_err_disk:
+ blk_cleanup_disk(disk);
out_free_idr:
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, index);
@@ -2135,7 +2193,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&nbd->config_lock);
config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
- !nbd->task_recv) {
+ !nbd->pid) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
ret = -EINVAL;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 187d779c8ca0..323af5c9c802 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -92,6 +92,10 @@ static int g_submit_queues = 1;
module_param_named(submit_queues, g_submit_queues, int, 0444);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
+static int g_poll_queues = 1;
+module_param_named(poll_queues, g_poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
+
static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, 0444);
MODULE_PARM_DESC(home_node, "Home node for the device");
@@ -324,29 +328,69 @@ nullb_device_##NAME##_store(struct config_item *item, const char *page, \
} \
CONFIGFS_ATTR(nullb_device_, NAME);
-static int nullb_apply_submit_queues(struct nullb_device *dev,
- unsigned int submit_queues)
+static int nullb_update_nr_hw_queues(struct nullb_device *dev,
+ unsigned int submit_queues,
+ unsigned int poll_queues)
+
{
- struct nullb *nullb = dev->nullb;
struct blk_mq_tag_set *set;
+ int ret, nr_hw_queues;
- if (!nullb)
+ if (!dev->nullb)
return 0;
/*
+ * Make sure at least one queue exists for each of submit and poll.
+ */
+ if (!submit_queues || !poll_queues)
+ return -EINVAL;
+
+ /*
* Make sure that null_init_hctx() does not access nullb->queues[] past
* the end of that array.
*/
- if (submit_queues > nr_cpu_ids)
+ if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
return -EINVAL;
- set = nullb->tag_set;
- blk_mq_update_nr_hw_queues(set, submit_queues);
- return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
+
+ /*
+ * Keep previous and new queue numbers in nullb_device for reference in
+ * the call back function null_map_queues().
+ */
+ dev->prev_submit_queues = dev->submit_queues;
+ dev->prev_poll_queues = dev->poll_queues;
+ dev->submit_queues = submit_queues;
+ dev->poll_queues = poll_queues;
+
+ set = dev->nullb->tag_set;
+ nr_hw_queues = submit_queues + poll_queues;
+ blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+ ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
+
+ if (ret) {
+ /* on error, revert the queue numbers */
+ dev->submit_queues = dev->prev_submit_queues;
+ dev->poll_queues = dev->prev_poll_queues;
+ }
+
+ return ret;
+}
+
+static int nullb_apply_submit_queues(struct nullb_device *dev,
+ unsigned int submit_queues)
+{
+ return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
+}
+
+static int nullb_apply_poll_queues(struct nullb_device *dev,
+ unsigned int poll_queues)
+{
+ return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
}
NULLB_DEVICE_ATTR(size, ulong, NULL);
NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
+NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
@@ -466,6 +510,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_size,
&nullb_device_attr_completion_nsec,
&nullb_device_attr_submit_queues,
+ &nullb_device_attr_poll_queues,
&nullb_device_attr_home_node,
&nullb_device_attr_queue_mode,
&nullb_device_attr_blocksize,
@@ -593,6 +638,9 @@ static struct nullb_device *null_alloc_dev(void)
dev->size = g_gb * 1024;
dev->completion_nsec = g_completion_nsec;
dev->submit_queues = g_submit_queues;
+ dev->prev_submit_queues = g_submit_queues;
+ dev->poll_queues = g_poll_queues;
+ dev->prev_poll_queues = g_poll_queues;
dev->home_node = g_home_node;
dev->queue_mode = g_queue_mode;
dev->blocksize = g_bs;
@@ -1422,7 +1470,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
return &nullb->queues[index];
}
-static blk_qc_t null_submit_bio(struct bio *bio)
+static void null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
@@ -1434,7 +1482,6 @@ static blk_qc_t null_submit_bio(struct bio *bio)
cmd->bio = bio;
null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
- return BLK_QC_T_NONE;
}
static bool should_timeout_request(struct request *rq)
@@ -1455,12 +1502,100 @@ static bool should_requeue_request(struct request *rq)
return false;
}
+static int null_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nullb *nullb = set->driver_data;
+ int i, qoff;
+ unsigned int submit_queues = g_submit_queues;
+ unsigned int poll_queues = g_poll_queues;
+
+ if (nullb) {
+ struct nullb_device *dev = nullb->dev;
+
+ /*
+ * Refer nr_hw_queues of the tag set to check if the expected
+ * number of hardware queues are prepared. If block layer failed
+ * to prepare them, use previous numbers of submit queues and
+ * poll queues to map queues.
+ */
+ if (set->nr_hw_queues ==
+ dev->submit_queues + dev->poll_queues) {
+ submit_queues = dev->submit_queues;
+ poll_queues = dev->poll_queues;
+ } else if (set->nr_hw_queues ==
+ dev->prev_submit_queues + dev->prev_poll_queues) {
+ submit_queues = dev->prev_submit_queues;
+ poll_queues = dev->prev_poll_queues;
+ } else {
+ pr_warn("tag set has unexpected nr_hw_queues: %d\n",
+ set->nr_hw_queues);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ switch (i) {
+ case HCTX_TYPE_DEFAULT:
+ map->nr_queues = submit_queues;
+ break;
+ case HCTX_TYPE_READ:
+ map->nr_queues = 0;
+ continue;
+ case HCTX_TYPE_POLL:
+ map->nr_queues = poll_queues;
+ break;
+ }
+ map->queue_offset = qoff;
+ qoff += map->nr_queues;
+ blk_mq_map_queues(map);
+ }
+
+ return 0;
+}
+
+static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct nullb_queue *nq = hctx->driver_data;
+ LIST_HEAD(list);
+ int nr = 0;
+
+ spin_lock(&nq->poll_lock);
+ list_splice_init(&nq->poll_list, &list);
+ spin_unlock(&nq->poll_lock);
+
+ while (!list_empty(&list)) {
+ struct nullb_cmd *cmd;
+ struct request *req;
+
+ req = list_first_entry(&list, struct request, queuelist);
+ list_del_init(&req->queuelist);
+ cmd = blk_mq_rq_to_pdu(req);
+ cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
+ blk_rq_sectors(req));
+ end_cmd(cmd);
+ nr++;
+ }
+
+ return nr;
+}
+
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
pr_info("rq %p timed out\n", rq);
+ if (hctx->type == HCTX_TYPE_POLL) {
+ struct nullb_queue *nq = hctx->driver_data;
+
+ spin_lock(&nq->poll_lock);
+ list_del_init(&rq->queuelist);
+ spin_unlock(&nq->poll_lock);
+ }
+
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources
@@ -1481,10 +1616,11 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nullb_queue *nq = hctx->driver_data;
sector_t nr_sectors = blk_rq_sectors(bd->rq);
sector_t sector = blk_rq_pos(bd->rq);
+ const bool is_poll = hctx->type == HCTX_TYPE_POLL;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- if (nq->dev->irqmode == NULL_IRQ_TIMER) {
+ if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
@@ -1508,6 +1644,13 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
}
+
+ if (is_poll) {
+ spin_lock(&nq->poll_lock);
+ list_add_tail(&bd->rq->queuelist, &nq->poll_list);
+ spin_unlock(&nq->poll_lock);
+ return BLK_STS_OK;
+ }
if (cmd->fake_timeout)
return BLK_STS_OK;
@@ -1543,6 +1686,8 @@ static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
nq->dev = nullb->dev;
+ INIT_LIST_HEAD(&nq->poll_list);
+ spin_lock_init(&nq->poll_lock);
}
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
@@ -1568,6 +1713,8 @@ static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_complete_rq,
.timeout = null_timeout_rq,
+ .poll = null_poll,
+ .map_queues = null_map_queues,
.init_hctx = null_init_hctx,
.exit_hctx = null_exit_hctx,
};
@@ -1664,13 +1811,17 @@ static int setup_commands(struct nullb_queue *nq)
static int setup_queues(struct nullb *nullb)
{
- nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue),
+ int nqueues = nr_cpu_ids;
+
+ if (g_poll_queues)
+ nqueues += g_poll_queues;
+
+ nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
GFP_KERNEL);
if (!nullb->queues)
return -ENOMEM;
nullb->queue_depth = nullb->dev->hw_queue_depth;
-
return 0;
}
@@ -1722,9 +1873,14 @@ static int null_gendisk_register(struct nullb *nullb)
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
+ int poll_queues;
+
set->ops = &null_mq_ops;
set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
g_submit_queues;
+ poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues;
+ if (poll_queues)
+ set->nr_hw_queues += poll_queues;
set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
g_hw_queue_depth;
set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
@@ -1734,7 +1890,11 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
set->flags |= BLK_MQ_F_NO_SCHED;
if (g_shared_tag_bitmap)
set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
- set->driver_data = NULL;
+ set->driver_data = nullb;
+ if (g_poll_queues)
+ set->nr_maps = 3;
+ else
+ set->nr_maps = 1;
if ((nullb && nullb->dev->blocking) || g_blocking)
set->flags |= BLK_MQ_F_BLOCKING;
@@ -1754,6 +1914,13 @@ static int null_validate_conf(struct nullb_device *dev)
dev->submit_queues = nr_cpu_ids;
else if (dev->submit_queues == 0)
dev->submit_queues = 1;
+ dev->prev_submit_queues = dev->submit_queues;
+
+ if (dev->poll_queues > g_poll_queues)
+ dev->poll_queues = g_poll_queues;
+ else if (dev->poll_queues == 0)
+ dev->poll_queues = 1;
+ dev->prev_poll_queues = dev->poll_queues;
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 64bef125d1df..78eb56b0ca55 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -32,6 +32,9 @@ struct nullb_queue {
struct nullb_device *dev;
unsigned int requeue_selection;
+ struct list_head poll_list;
+ spinlock_t poll_lock;
+
struct nullb_cmd *cmds;
};
@@ -83,6 +86,9 @@ struct nullb_device {
unsigned int zone_max_open; /* max number of open zones */
unsigned int zone_max_active; /* max number of active zones */
unsigned int submit_queues; /* number of submission queues */
+ unsigned int prev_submit_queues; /* number of submission queues before change */
+ unsigned int poll_queues; /* number of IOPOLL submission queues */
+ unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
unsigned int home_node; /* home node for the device */
unsigned int queue_mode; /* block interface */
unsigned int blocksize; /* block size */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index f9cdd11f02f5..f6b1d63e96e1 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -183,8 +183,6 @@ static int pcd_audio_ioctl(struct cdrom_device_info *cdi,
static int pcd_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc);
-static int pcd_detect(void);
-static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
@@ -302,53 +300,6 @@ static const struct blk_mq_ops pcd_mq_ops = {
.queue_rq = pcd_queue_rq,
};
-static void pcd_init_units(void)
-{
- struct pcd_unit *cd;
- int unit;
-
- pcd_drive_count = 0;
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- struct gendisk *disk;
-
- if (blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE))
- continue;
-
- disk = blk_mq_alloc_disk(&cd->tag_set, cd);
- if (IS_ERR(disk)) {
- blk_mq_free_tag_set(&cd->tag_set);
- continue;
- }
-
- INIT_LIST_HEAD(&cd->rq_list);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- cd->disk = disk;
- cd->pi = &cd->pia;
- cd->present = 0;
- cd->last_sense = 0;
- cd->changed = 1;
- cd->drive = (*drives[unit])[D_SLV];
- if ((*drives[unit])[D_PRT])
- pcd_drive_count++;
-
- cd->name = &cd->info.name[0];
- snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
- cd->info.ops = &pcd_dops;
- cd->info.handle = cd;
- cd->info.speed = 0;
- cd->info.capacity = 1;
- cd->info.mask = 0;
- disk->major = major;
- disk->first_minor = unit;
- disk->minors = 1;
- strcpy(disk->disk_name, cd->name); /* umm... */
- disk->fops = &pcd_bdops;
- disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- }
-}
-
static int pcd_open(struct cdrom_device_info *cdi, int purpose)
{
struct pcd_unit *cd = cdi->handle;
@@ -630,10 +581,11 @@ static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr)
return CDS_DISC_OK;
}
-static int pcd_identify(struct pcd_unit *cd, char *id)
+static int pcd_identify(struct pcd_unit *cd)
{
- int k, s;
char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
+ char id[18];
+ int k, s;
pcd_bufblk = -1;
@@ -661,108 +613,47 @@ static int pcd_identify(struct pcd_unit *cd, char *id)
}
/*
- * returns 0, with id set if drive is detected
- * -1, if drive detection failed
+ * returns 0, with id set if drive is detected, otherwise an error code.
*/
-static int pcd_probe(struct pcd_unit *cd, int ms, char *id)
+static int pcd_probe(struct pcd_unit *cd, int ms)
{
if (ms == -1) {
for (cd->drive = 0; cd->drive <= 1; cd->drive++)
- if (!pcd_reset(cd) && !pcd_identify(cd, id))
+ if (!pcd_reset(cd) && !pcd_identify(cd))
return 0;
} else {
cd->drive = ms;
- if (!pcd_reset(cd) && !pcd_identify(cd, id))
+ if (!pcd_reset(cd) && !pcd_identify(cd))
return 0;
}
- return -1;
+ return -ENODEV;
}
-static void pcd_probe_capabilities(void)
+static int pcd_probe_capabilities(struct pcd_unit *cd)
{
- int unit, r;
- char buffer[32];
char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 };
- struct pcd_unit *cd;
-
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->present)
- continue;
- r = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
- if (r)
- continue;
- /* we should now have the cap page */
- if ((buffer[11] & 1) == 0)
- cd->info.mask |= CDC_CD_R;
- if ((buffer[11] & 2) == 0)
- cd->info.mask |= CDC_CD_RW;
- if ((buffer[12] & 1) == 0)
- cd->info.mask |= CDC_PLAY_AUDIO;
- if ((buffer[14] & 1) == 0)
- cd->info.mask |= CDC_LOCK;
- if ((buffer[14] & 8) == 0)
- cd->info.mask |= CDC_OPEN_TRAY;
- if ((buffer[14] >> 6) == 0)
- cd->info.mask |= CDC_CLOSE_TRAY;
- }
-}
-
-static int pcd_detect(void)
-{
- char id[18];
- int k, unit;
- struct pcd_unit *cd;
+ char buffer[32];
+ int ret;
- printk("%s: %s version %s, major %d, nice %d\n",
- name, name, PCD_VERSION, major, nice);
+ ret = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
+ if (ret)
+ return ret;
+
+ /* we should now have the cap page */
+ if ((buffer[11] & 1) == 0)
+ cd->info.mask |= CDC_CD_R;
+ if ((buffer[11] & 2) == 0)
+ cd->info.mask |= CDC_CD_RW;
+ if ((buffer[12] & 1) == 0)
+ cd->info.mask |= CDC_PLAY_AUDIO;
+ if ((buffer[14] & 1) == 0)
+ cd->info.mask |= CDC_LOCK;
+ if ((buffer[14] & 8) == 0)
+ cd->info.mask |= CDC_OPEN_TRAY;
+ if ((buffer[14] >> 6) == 0)
+ cd->info.mask |= CDC_CLOSE_TRAY;
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
-
- k = 0;
- if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- cd = pcd;
- if (cd->disk && pi_init(cd->pi, 1, -1, -1, -1, -1, -1,
- pcd_buffer, PI_PCD, verbose, cd->name)) {
- if (!pcd_probe(cd, -1, id)) {
- cd->present = 1;
- k++;
- } else
- pi_release(cd->pi);
- }
- } else {
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- int *conf = *drives[unit];
- if (!conf[D_PRT])
- continue;
- if (!cd->disk)
- continue;
- if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- pcd_buffer, PI_PCD, verbose, cd->name))
- continue;
- if (!pcd_probe(cd, conf[D_SLV], id)) {
- cd->present = 1;
- k++;
- } else
- pi_release(cd->pi);
- }
- }
- if (k)
- return 0;
-
- printk("%s: No CD-ROM drive found\n", name);
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->disk)
- continue;
- blk_cleanup_disk(cd->disk);
- blk_mq_free_tag_set(&cd->tag_set);
- }
- pi_unregister_driver(par_drv);
- return -1;
+ return 0;
}
/* I/O request processing */
@@ -999,43 +890,130 @@ static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
return 0;
}
+static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port,
+ int mode, int unit, int protocol, int delay, int ms)
+{
+ struct gendisk *disk;
+ int ret;
+
+ ret = blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (ret)
+ return ret;
+
+ disk = blk_mq_alloc_disk(&cd->tag_set, cd);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
+ goto out_free_tag_set;
+ }
+
+ INIT_LIST_HEAD(&cd->rq_list);
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
+ cd->disk = disk;
+ cd->pi = &cd->pia;
+ cd->present = 0;
+ cd->last_sense = 0;
+ cd->changed = 1;
+ cd->drive = (*drives[cd - pcd])[D_SLV];
+
+ cd->name = &cd->info.name[0];
+ snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
+ cd->info.ops = &pcd_dops;
+ cd->info.handle = cd;
+ cd->info.speed = 0;
+ cd->info.capacity = 1;
+ cd->info.mask = 0;
+ disk->major = major;
+ disk->first_minor = unit;
+ disk->minors = 1;
+ strcpy(disk->disk_name, cd->name); /* umm... */
+ disk->fops = &pcd_bdops;
+ disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
+
+ if (!pi_init(cd->pi, autoprobe, port, mode, unit, protocol, delay,
+ pcd_buffer, PI_PCD, verbose, cd->name)) {
+ ret = -ENODEV;
+ goto out_free_disk;
+ }
+ ret = pcd_probe(cd, ms);
+ if (ret)
+ goto out_pi_release;
+
+ cd->present = 1;
+ pcd_probe_capabilities(cd);
+ ret = register_cdrom(cd->disk, &cd->info);
+ if (ret)
+ goto out_pi_release;
+ ret = add_disk(cd->disk);
+ if (ret)
+ goto out_unreg_cdrom;
+ return 0;
+
+out_unreg_cdrom:
+ unregister_cdrom(&cd->info);
+out_pi_release:
+ pi_release(cd->pi);
+out_free_disk:
+ blk_cleanup_disk(cd->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&cd->tag_set);
+ return ret;
+}
+
static int __init pcd_init(void)
{
- struct pcd_unit *cd;
- int unit;
+ int found = 0, unit;
if (disable)
return -EINVAL;
- pcd_init_units();
+ if (register_blkdev(major, name))
+ return -EBUSY;
- if (pcd_detect())
- return -ENODEV;
+ pr_info("%s: %s version %s, major %d, nice %d\n",
+ name, name, PCD_VERSION, major, nice);
- /* get the atapi capabilities page */
- pcd_probe_capabilities();
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ goto out_unregister_blkdev;
+ }
- if (register_blkdev(major, name)) {
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->disk)
- continue;
+ for (unit = 0; unit < PCD_UNITS; unit++) {
+ if ((*drives[unit])[D_PRT])
+ pcd_drive_count++;
+ }
+
+ if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
+ if (!pcd_init_unit(pcd, 1, -1, -1, -1, -1, -1, -1))
+ found++;
+ } else {
+ for (unit = 0; unit < PCD_UNITS; unit++) {
+ struct pcd_unit *cd = &pcd[unit];
+ int *conf = *drives[unit];
- blk_cleanup_queue(cd->disk->queue);
- blk_mq_free_tag_set(&cd->tag_set);
- put_disk(cd->disk);
+ if (!conf[D_PRT])
+ continue;
+ if (!pcd_init_unit(cd, 0, conf[D_PRT], conf[D_MOD],
+ conf[D_UNI], conf[D_PRO], conf[D_DLY],
+ conf[D_SLV]))
+ found++;
}
- return -EBUSY;
}
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (cd->present) {
- register_cdrom(cd->disk, &cd->info);
- cd->disk->private_data = cd;
- add_disk(cd->disk);
- }
+ if (!found) {
+ pr_info("%s: No CD-ROM drive found\n", name);
+ goto out_unregister_pi_driver;
}
return 0;
+
+out_unregister_pi_driver:
+ pi_unregister_driver(par_drv);
+out_unregister_blkdev:
+ unregister_blkdev(major, name);
+ return -ENODEV;
}
static void __exit pcd_exit(void)
@@ -1044,20 +1022,18 @@ static void __exit pcd_exit(void)
int unit;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->disk)
+ if (!cd->present)
continue;
- if (cd->present) {
- del_gendisk(cd->disk);
- pi_release(cd->pi);
- unregister_cdrom(&cd->info);
- }
- blk_cleanup_queue(cd->disk->queue);
+ unregister_cdrom(&cd->info);
+ del_gendisk(cd->disk);
+ pi_release(cd->pi);
+ blk_cleanup_disk(cd->disk);
+
blk_mq_free_tag_set(&cd->tag_set);
- put_disk(cd->disk);
}
- unregister_blkdev(major, name);
pi_unregister_driver(par_drv);
+ unregister_blkdev(major, name);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 675327df6aff..fba865058a17 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -775,14 +775,14 @@ static int pd_special_command(struct pd_unit *disk,
struct request *rq;
struct pd_req *req;
- rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
+ rq = blk_mq_alloc_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
req = blk_mq_rq_to_pdu(rq);
req->func = func;
blk_execute_rq(disk->gd, rq, 0);
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return 0;
}
@@ -875,9 +875,27 @@ static const struct blk_mq_ops pd_mq_ops = {
.queue_rq = pd_queue_rq,
};
-static void pd_probe_drive(struct pd_unit *disk)
+static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port,
+ int mode, int unit, int protocol, int delay)
{
+ int index = disk - pd;
+ int *parm = *drives[index];
struct gendisk *p;
+ int ret;
+
+ disk->pi = &disk->pia;
+ disk->access = 0;
+ disk->changed = 1;
+ disk->capacity = 0;
+ disk->drive = parm[D_SLV];
+ snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a' + index);
+ disk->alt_geom = parm[D_GEO];
+ disk->standby = parm[D_SBY];
+ INIT_LIST_HEAD(&disk->rq_list);
+
+ if (!pi_init(disk->pi, autoprobe, port, mode, unit, protocol, delay,
+ pd_scratch, PI_PD, verbose, disk->name))
+ return -ENXIO;
memset(&disk->tag_set, 0, sizeof(disk->tag_set));
disk->tag_set.ops = &pd_mq_ops;
@@ -887,14 +905,14 @@ static void pd_probe_drive(struct pd_unit *disk)
disk->tag_set.queue_depth = 2;
disk->tag_set.numa_node = NUMA_NO_NODE;
disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
-
- if (blk_mq_alloc_tag_set(&disk->tag_set))
- return;
+ ret = blk_mq_alloc_tag_set(&disk->tag_set);
+ if (ret)
+ goto pi_release;
p = blk_mq_alloc_disk(&disk->tag_set, disk);
if (IS_ERR(p)) {
- blk_mq_free_tag_set(&disk->tag_set);
- return;
+ ret = PTR_ERR(p);
+ goto free_tag_set;
}
disk->gd = p;
@@ -905,102 +923,88 @@ static void pd_probe_drive(struct pd_unit *disk)
p->minors = 1 << PD_BITS;
p->events = DISK_EVENT_MEDIA_CHANGE;
p->private_data = disk;
-
blk_queue_max_hw_sectors(p->queue, cluster);
blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
if (disk->drive == -1) {
- for (disk->drive = 0; disk->drive <= 1; disk->drive++)
- if (pd_special_command(disk, pd_identify) == 0)
- return;
- } else if (pd_special_command(disk, pd_identify) == 0)
- return;
- disk->gd = NULL;
+ for (disk->drive = 0; disk->drive <= 1; disk->drive++) {
+ ret = pd_special_command(disk, pd_identify);
+ if (ret == 0)
+ break;
+ }
+ } else {
+ ret = pd_special_command(disk, pd_identify);
+ }
+ if (ret)
+ goto put_disk;
+ set_capacity(disk->gd, disk->capacity);
+ ret = add_disk(disk->gd);
+ if (ret)
+ goto cleanup_disk;
+ return 0;
+cleanup_disk:
+ blk_cleanup_disk(disk->gd);
+put_disk:
put_disk(p);
+ disk->gd = NULL;
+free_tag_set:
+ blk_mq_free_tag_set(&disk->tag_set);
+pi_release:
+ pi_release(disk->pi);
+ return ret;
}
-static int pd_detect(void)
+static int __init pd_init(void)
{
int found = 0, unit, pd_drive_count = 0;
struct pd_unit *disk;
- for (unit = 0; unit < PD_UNITS; unit++) {
- int *parm = *drives[unit];
- struct pd_unit *disk = pd + unit;
- disk->pi = &disk->pia;
- disk->access = 0;
- disk->changed = 1;
- disk->capacity = 0;
- disk->drive = parm[D_SLV];
- snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
- disk->alt_geom = parm[D_GEO];
- disk->standby = parm[D_SBY];
- if (parm[D_PRT])
- pd_drive_count++;
- INIT_LIST_HEAD(&disk->rq_list);
- }
+ if (disable)
+ return -ENODEV;
+
+ if (register_blkdev(major, name))
+ return -ENODEV;
+
+ printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
+ name, name, PD_VERSION, major, cluster, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
- return -1;
+ goto out_unregister_blkdev;
}
- if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- disk = pd;
- if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
- PI_PD, verbose, disk->name)) {
- pd_probe_drive(disk);
- if (!disk->gd)
- pi_release(disk->pi);
- }
+ for (unit = 0; unit < PD_UNITS; unit++) {
+ int *parm = *drives[unit];
+ if (parm[D_PRT])
+ pd_drive_count++;
+ }
+
+ if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
+ if (!pd_probe_drive(pd, 1, -1, -1, -1, -1, -1))
+ found++;
} else {
for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
int *parm = *drives[unit];
if (!parm[D_PRT])
continue;
- if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
- parm[D_UNI], parm[D_PRO], parm[D_DLY],
- pd_scratch, PI_PD, verbose, disk->name)) {
- pd_probe_drive(disk);
- if (!disk->gd)
- pi_release(disk->pi);
- }
- }
- }
- for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
- if (disk->gd) {
- set_capacity(disk->gd, disk->capacity);
- add_disk(disk->gd);
- found = 1;
+ if (!pd_probe_drive(disk, 0, parm[D_PRT], parm[D_MOD],
+ parm[D_UNI], parm[D_PRO], parm[D_DLY]))
+ found++;
}
}
if (!found) {
printk("%s: no valid drive found\n", name);
- pi_unregister_driver(par_drv);
+ goto out_pi_unregister_driver;
}
- return found;
-}
-
-static int __init pd_init(void)
-{
- if (disable)
- goto out1;
-
- if (register_blkdev(major, name))
- goto out1;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PD_VERSION, major, cluster, nice);
- if (!pd_detect())
- goto out2;
return 0;
-out2:
+out_pi_unregister_driver:
+ pi_unregister_driver(par_drv);
+out_unregister_blkdev:
unregister_blkdev(major, name);
-out1:
return -ENODEV;
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index d5b9c88ba76f..bf8d0ef41a0a 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -214,7 +214,6 @@ static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static void pf_release(struct gendisk *disk, fmode_t mode);
-static int pf_detect(void);
static void do_pf_read(void);
static void do_pf_read_start(void);
static void do_pf_write(void);
@@ -285,45 +284,6 @@ static const struct blk_mq_ops pf_mq_ops = {
.queue_rq = pf_queue_rq,
};
-static void __init pf_init_units(void)
-{
- struct pf_unit *pf;
- int unit;
-
- pf_drive_count = 0;
- for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
- struct gendisk *disk;
-
- if (blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE))
- continue;
-
- disk = blk_mq_alloc_disk(&pf->tag_set, pf);
- if (IS_ERR(disk)) {
- blk_mq_free_tag_set(&pf->tag_set);
- continue;
- }
-
- INIT_LIST_HEAD(&pf->rq_list);
- blk_queue_max_segments(disk->queue, cluster);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- pf->disk = disk;
- pf->pi = &pf->pia;
- pf->media_status = PF_NM;
- pf->drive = (*drives[unit])[D_SLV];
- pf->lun = (*drives[unit])[D_LUN];
- snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
- disk->major = major;
- disk->first_minor = unit;
- disk->minors = 1;
- strcpy(disk->disk_name, pf->name);
- disk->fops = &pf_fops;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- if (!(*drives[unit])[D_PRT])
- pf_drive_count++;
- }
-}
-
static int pf_open(struct block_device *bdev, fmode_t mode)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
@@ -691,9 +651,9 @@ static int pf_identify(struct pf_unit *pf)
return 0;
}
-/* returns 0, with id set if drive is detected
- -1, if drive detection failed
-*/
+/*
+ * returns 0, with id set if drive is detected, otherwise an error code.
+ */
static int pf_probe(struct pf_unit *pf)
{
if (pf->drive == -1) {
@@ -715,60 +675,7 @@ static int pf_probe(struct pf_unit *pf)
if (!pf_identify(pf))
return 0;
}
- return -1;
-}
-
-static int pf_detect(void)
-{
- struct pf_unit *pf = units;
- int k, unit;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PF_VERSION, major, cluster, nice);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
- k = 0;
- if (pf_drive_count == 0) {
- if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
- verbose, pf->name)) {
- if (!pf_probe(pf) && pf->disk) {
- pf->present = 1;
- k++;
- } else
- pi_release(pf->pi);
- }
-
- } else
- for (unit = 0; unit < PF_UNITS; unit++, pf++) {
- int *conf = *drives[unit];
- if (!conf[D_PRT])
- continue;
- if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- pf_scratch, PI_PF, verbose, pf->name)) {
- if (pf->disk && !pf_probe(pf)) {
- pf->present = 1;
- k++;
- } else
- pi_release(pf->pi);
- }
- }
- if (k)
- return 0;
-
- printk("%s: No ATAPI disk detected\n", name);
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->disk)
- continue;
- blk_cleanup_disk(pf->disk);
- blk_mq_free_tag_set(&pf->tag_set);
- }
- pi_unregister_driver(par_drv);
- return -1;
+ return -ENODEV;
}
/* The i/o request engine */
@@ -1014,61 +921,134 @@ static void do_pf_write_done(void)
next_request(0);
}
+static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
+ int mode, int unit, int protocol, int delay, int ms)
+{
+ struct gendisk *disk;
+ int ret;
+
+ ret = blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (ret)
+ return ret;
+
+ disk = blk_mq_alloc_disk(&pf->tag_set, pf);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
+ goto out_free_tag_set;
+ }
+ disk->major = major;
+ disk->first_minor = pf - units;
+ disk->minors = 1;
+ strcpy(disk->disk_name, pf->name);
+ disk->fops = &pf_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
+ disk->private_data = pf;
+
+ blk_queue_max_segments(disk->queue, cluster);
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
+
+ INIT_LIST_HEAD(&pf->rq_list);
+ pf->disk = disk;
+ pf->pi = &pf->pia;
+ pf->media_status = PF_NM;
+ pf->drive = (*drives[disk->first_minor])[D_SLV];
+ pf->lun = (*drives[disk->first_minor])[D_LUN];
+ snprintf(pf->name, PF_NAMELEN, "%s%d", name, disk->first_minor);
+
+ if (!pi_init(pf->pi, autoprobe, port, mode, unit, protocol, delay,
+ pf_scratch, PI_PF, verbose, pf->name)) {
+ ret = -ENODEV;
+ goto out_free_disk;
+ }
+ ret = pf_probe(pf);
+ if (ret)
+ goto out_pi_release;
+
+ ret = add_disk(disk);
+ if (ret)
+ goto out_pi_release;
+ pf->present = 1;
+ return 0;
+
+out_pi_release:
+ pi_release(pf->pi);
+out_free_disk:
+ blk_cleanup_disk(pf->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&pf->tag_set);
+ return ret;
+}
+
static int __init pf_init(void)
{ /* preliminary initialisation */
struct pf_unit *pf;
- int unit;
+ int found = 0, unit;
if (disable)
return -EINVAL;
- pf_init_units();
+ if (register_blkdev(major, name))
+ return -EBUSY;
- if (pf_detect())
- return -ENODEV;
- pf_busy = 0;
+ printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
+ name, name, PF_VERSION, major, cluster, nice);
- if (register_blkdev(major, name)) {
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->disk)
- continue;
- blk_cleanup_queue(pf->disk->queue);
- blk_mq_free_tag_set(&pf->tag_set);
- put_disk(pf->disk);
- }
- return -EBUSY;
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ goto out_unregister_blkdev;
}
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- struct gendisk *disk = pf->disk;
+ for (unit = 0; unit < PF_UNITS; unit++) {
+ if (!(*drives[unit])[D_PRT])
+ pf_drive_count++;
+ }
- if (!pf->present)
- continue;
- disk->private_data = pf;
- add_disk(disk);
+ pf = units;
+ if (pf_drive_count == 0) {
+ if (pf_init_unit(pf, 1, -1, -1, -1, -1, -1, verbose))
+ found++;
+ } else {
+ for (unit = 0; unit < PF_UNITS; unit++, pf++) {
+ int *conf = *drives[unit];
+ if (!conf[D_PRT])
+ continue;
+ if (pf_init_unit(pf, 0, conf[D_PRT], conf[D_MOD],
+ conf[D_UNI], conf[D_PRO], conf[D_DLY],
+ verbose))
+ found++;
+ }
+ }
+ if (!found) {
+ printk("%s: No ATAPI disk detected\n", name);
+ goto out_unregister_pi_driver;
}
+ pf_busy = 0;
return 0;
+
+out_unregister_pi_driver:
+ pi_unregister_driver(par_drv);
+out_unregister_blkdev:
+ unregister_blkdev(major, name);
+ return -ENODEV;
}
static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
- unregister_blkdev(major, name);
+
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->disk)
+ if (!pf->present)
continue;
-
- if (pf->present)
- del_gendisk(pf->disk);
-
- blk_cleanup_queue(pf->disk->queue);
+ del_gendisk(pf->disk);
+ blk_cleanup_disk(pf->disk);
blk_mq_free_tag_set(&pf->tag_set);
- put_disk(pf->disk);
-
- if (pf->present)
- pi_release(pf->pi);
+ pi_release(pf->pi);
}
+
+ unregister_blkdev(major, name);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 0f26b2510a75..b53f648302c1 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -703,7 +703,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
struct request *rq;
int ret = 0;
- rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
+ rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -726,7 +726,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (scsi_req(rq)->result)
ret = -EIO;
out:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return ret;
}
@@ -2400,7 +2400,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
}
}
-static blk_qc_t pkt_submit_bio(struct bio *bio)
+static void pkt_submit_bio(struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
@@ -2423,7 +2423,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
*/
if (bio_data_dir(bio) == READ) {
pkt_make_request_read(pd, bio);
- return BLK_QC_T_NONE;
+ return;
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@@ -2455,10 +2455,9 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
} while (split != bio);
- return BLK_QC_T_NONE;
+ return;
end_io:
bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static void pkt_init_queue(struct pktcdvd_device *pd)
@@ -2537,6 +2536,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
int i;
char b[BDEVNAME_SIZE];
struct block_device *bdev;
+ struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
pkt_err(pd, "recursive setup not allowed\n");
@@ -2560,10 +2560,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
- if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+ sdev = scsi_device_from_queue(bdev->bd_disk->queue);
+ if (!sdev) {
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}
+ put_device(&sdev->sdev_gendev);
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
@@ -2729,7 +2731,9 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
- add_disk(disk);
+ ret = add_disk(disk);
+ if (ret)
+ goto out_mem2;
pkt_sysfs_dev_new(pd);
pkt_debugfs_dev_new(pd);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index c7b19e128b03..d1ebf193cb9a 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -578,7 +578,7 @@ out:
return next;
}
-static blk_qc_t ps3vram_submit_bio(struct bio *bio)
+static void ps3vram_submit_bio(struct bio *bio)
{
struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -594,13 +594,11 @@ static blk_qc_t ps3vram_submit_bio(struct bio *bio)
spin_unlock_irq(&priv->lock);
if (busy)
- return BLK_QC_T_NONE;
+ return;
do {
bio = ps3vram_do_bio(dev, bio);
} while (bio);
-
- return BLK_QC_T_NONE;
}
static const struct block_device_operations ps3vram_fops = {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index e65c9d706f6f..953fa134cd3d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -836,7 +836,7 @@ struct rbd_options {
u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
};
-#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
+#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ
#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false
@@ -7054,7 +7054,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
if (rc)
goto err_out_image_lock;
- device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
+ rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
+ if (rc)
+ goto err_out_cleanup_disk;
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
@@ -7068,6 +7070,8 @@ out:
module_put(THIS_MODULE);
return rc;
+err_out_cleanup_disk:
+ rbd_free_disk(rbd_dev);
err_out_image_lock:
rbd_dev_image_unlock(rbd_dev);
rbd_dev_device_release(rbd_dev);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index bd4a41afbbfc..2df0657cdf00 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1176,7 +1176,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return ret;
}
-static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx)
+static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct rnbd_queue *q = hctx->driver_data;
struct rnbd_clt_dev *dev = q->dev;
@@ -1384,8 +1384,10 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
}
-static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
+static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
{
+ int err;
+
dev->gd->major = rnbd_client_major;
dev->gd->first_minor = idx << RNBD_PART_BITS;
dev->gd->minors = 1 << RNBD_PART_BITS;
@@ -1410,7 +1412,11 @@ static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
if (!dev->rotational)
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
- add_disk(dev->gd);
+ err = add_disk(dev->gd);
+ if (err)
+ blk_cleanup_disk(dev->gd);
+
+ return err;
}
static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
@@ -1426,8 +1432,7 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
rnbd_init_mq_hw_queues(dev);
setup_request_queue(dev);
- rnbd_clt_setup_gen_disk(dev, idx);
- return 0;
+ return rnbd_clt_setup_gen_disk(dev, idx);
}
static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index c1bc5c0fef71..de5d5a8df81d 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -10,7 +10,7 @@
#define RNBD_PROTO_H
#include <linux/types.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/limits.h>
#include <linux/inet.h>
#include <linux/in.h>
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 83636714b8d7..8d9d69f5dfbc 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -935,7 +935,9 @@ static int rsxx_pci_probe(struct pci_dev *dev,
card->size8 = 0;
}
- rsxx_attach_dev(card);
+ st = rsxx_attach_dev(card);
+ if (st)
+ goto failed_create_dev;
/************* Setup Debugfs *************/
rsxx_debugfs_dev_new(card);
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 1cc40b0ea761..dd33f1bdf3b8 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -50,7 +50,7 @@ struct rsxx_bio_meta {
static struct kmem_cache *bio_meta_pool;
-static blk_qc_t rsxx_submit_bio(struct bio *bio);
+static void rsxx_submit_bio(struct bio *bio);
/*----------------- Block Device Operations -----------------*/
static int rsxx_blkdev_ioctl(struct block_device *bdev,
@@ -120,7 +120,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
}
}
-static blk_qc_t rsxx_submit_bio(struct bio *bio)
+static void rsxx_submit_bio(struct bio *bio)
{
struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
struct rsxx_bio_meta *bio_meta;
@@ -169,7 +169,7 @@ static blk_qc_t rsxx_submit_bio(struct bio *bio)
if (st)
goto queue_err;
- return BLK_QC_T_NONE;
+ return;
queue_err:
kmem_cache_free(bio_meta_pool, bio_meta);
@@ -177,7 +177,6 @@ req_err:
if (st)
bio->bi_status = st;
bio_endio(bio);
- return BLK_QC_T_NONE;
}
/*----------------- Device Setup -------------------*/
@@ -192,6 +191,8 @@ static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
int rsxx_attach_dev(struct rsxx_cardinfo *card)
{
+ int err = 0;
+
mutex_lock(&card->dev_lock);
/* The block device requires the stripe size from the config. */
@@ -200,13 +201,17 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
- device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
- card->bdev_attached = 1;
+ err = device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
+ if (err == 0)
+ card->bdev_attached = 1;
}
mutex_unlock(&card->dev_lock);
- return 0;
+ if (err)
+ blk_cleanup_disk(card->gendisk);
+
+ return err;
}
void rsxx_detach_dev(struct rsxx_cardinfo *card)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 7ccc8d2a41bc..821594cd1315 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -16,6 +16,7 @@
#include <linux/fd.h>
#include <linux/slab.h>
#include <linux/blk-mq.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
@@ -184,6 +185,7 @@ struct floppy_state {
int track;
int ref_count;
+ bool registered;
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
@@ -771,6 +773,20 @@ static const struct blk_mq_ops swim_mq_ops = {
.queue_rq = swim_queue_rq,
};
+static void swim_cleanup_floppy_disk(struct floppy_state *fs)
+{
+ struct gendisk *disk = fs->disk;
+
+ if (!disk)
+ return;
+
+ if (fs->registered)
+ del_gendisk(fs->disk);
+
+ blk_cleanup_disk(disk);
+ blk_mq_free_tag_set(&fs->tag_set);
+}
+
static int swim_floppy_init(struct swim_priv *swd)
{
int err;
@@ -827,7 +843,10 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
- add_disk(swd->unit[drive].disk);
+ err = add_disk(swd->unit[drive].disk);
+ if (err)
+ goto exit_put_disks;
+ swd->unit[drive].registered = true;
}
return 0;
@@ -835,12 +854,7 @@ static int swim_floppy_init(struct swim_priv *swd)
exit_put_disks:
unregister_blkdev(FLOPPY_MAJOR, "fd");
do {
- struct gendisk *disk = swd->unit[drive].disk;
-
- if (!disk)
- continue;
- blk_cleanup_disk(disk);
- blk_mq_free_tag_set(&swd->unit[drive].tag_set);
+ swim_cleanup_floppy_disk(&swd->unit[drive]);
} while (drive--);
return err;
}
@@ -909,12 +923,8 @@ static int swim_remove(struct platform_device *dev)
int drive;
struct resource *res;
- for (drive = 0; drive < swd->floppy_count; drive++) {
- del_gendisk(swd->unit[drive].disk);
- blk_cleanup_queue(swd->unit[drive].disk->queue);
- blk_mq_free_tag_set(&swd->unit[drive].tag_set);
- put_disk(swd->unit[drive].disk);
- }
+ for (drive = 0; drive < swd->floppy_count; drive++)
+ swim_cleanup_floppy_disk(&swd->unit[drive]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 965af0a3e95b..4b91c9aa5892 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include <linux/major.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
@@ -1229,7 +1230,9 @@ static int swim3_attach(struct macio_dev *mdev,
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
- add_disk(disk);
+ rc = add_disk(disk);
+ if (rc)
+ goto out_cleanup_disk;
disks[floppy_count++] = disk;
return 0;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 420cd952ddc4..d1676fe0da1a 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -297,6 +297,7 @@ struct carm_host {
struct work_struct fsm_task;
+ int probe_err;
struct completion probe_comp;
};
@@ -1181,8 +1182,11 @@ static void carm_fsm_task (struct work_struct *work)
struct gendisk *disk = port->disk;
set_capacity(disk, port->capacity);
- add_disk(disk);
- activated++;
+ host->probe_err = add_disk(disk);
+ if (!host->probe_err)
+ activated++;
+ else
+ break;
}
printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
@@ -1192,11 +1196,9 @@ static void carm_fsm_task (struct work_struct *work)
reschedule = 1;
break;
}
-
case HST_PROBE_FINISHED:
complete(&host->probe_comp);
break;
-
case HST_ERROR:
/* FIXME: TODO */
break;
@@ -1507,7 +1509,12 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_irq;
DPRINTK("waiting for probe_comp\n");
+ host->probe_err = -ENODEV;
wait_for_completion(&host->probe_comp);
+ if (host->probe_err) {
+ rc = host->probe_err;
+ goto err_out_free_irq;
+ }
printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 303caf2d17d0..fc4fc951dba7 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -312,7 +312,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
struct request *req;
int err;
- req = blk_get_request(q, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -323,7 +323,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
blk_execute_rq(vblk->disk, req, false);
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
return err;
}
@@ -815,9 +815,17 @@ static int virtblk_probe(struct virtio_device *vdev)
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
struct virtio_blk_config, blk_size,
&blk_size);
- if (!err)
+ if (!err) {
+ err = blk_validate_block_size(blk_size);
+ if (err) {
+ dev_err(&vdev->dev,
+ "virtio_blk: invalid block size: 0x%x\n",
+ blk_size);
+ goto out_cleanup_disk;
+ }
+
blk_queue_logical_block_size(q, blk_size);
- else
+ } else
blk_size = queue_logical_block_size(q);
/* Use topology information if available */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 33eba3df4dd9..914587aabca0 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -98,7 +98,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
return;
}
- err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
+ err = sync_blockdev(blkif->vbd.bdev);
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 72902104f111..8e3983e456f3 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -42,6 +42,7 @@
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
@@ -2385,7 +2386,13 @@ static void blkfront_connect(struct blkfront_info *info)
for_each_rinfo(info, rinfo, i)
kick_pending_request_queues(rinfo);
- device_add_disk(&info->xbdev->dev, info->gd, NULL);
+ err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
+ if (err) {
+ blk_cleanup_disk(info->gd);
+ blk_mq_free_tag_set(&info->tag_set);
+ info->rq = NULL;
+ goto fail;
+ }
info->is_ready = 1;
return;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fcaf2750f68f..a68297fb51a2 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1598,22 +1598,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
/*
* Handler function for all zram I/O requests.
*/
-static blk_qc_t zram_submit_bio(struct bio *bio)
+static void zram_submit_bio(struct bio *bio)
{
struct zram *zram = bio->bi_bdev->bd_disk->private_data;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
- goto error;
+ bio_io_error(bio);
+ return;
}
__zram_make_request(zram, bio);
- return BLK_QC_T_NONE;
-
-error:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static void zram_slot_free_notify(struct block_device *bdev,
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index bd2e5b1560f5..9877e413fce3 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -344,6 +344,12 @@ static void cdrom_sysctl_register(void);
static LIST_HEAD(cdrom_list);
+static void signal_media_change(struct cdrom_device_info *cdi)
+{
+ cdi->mc_flags = 0x3; /* set media changed bits, on both queues */
+ cdi->last_media_change_ms = ktime_to_ms(ktime_get());
+}
+
int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
@@ -616,6 +622,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
ENSURE(cdo, generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
cdi->options = CDO_USE_FFLAGS;
+ cdi->last_media_change_ms = ktime_to_ms(ktime_get());
if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
cdi->options |= (int) CDO_AUTO_CLOSE;
@@ -864,7 +871,7 @@ static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[32];
- int ret, mmc3_profile;
+ int mmc3_profile;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
@@ -874,7 +881,7 @@ static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
cgc.quiet = 1;
- if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
+ if (cdi->ops->generic_packet(cdi, &cgc))
mmc3_profile = 0xffff;
else
mmc3_profile = (buffer[6] << 8) | buffer[7];
@@ -1421,8 +1428,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
cdi->ops->check_events(cdi, 0, slot);
if (slot == CDSL_NONE) {
- /* set media changed bits, on both queues */
- cdi->mc_flags = 0x3;
+ signal_media_change(cdi);
return cdrom_load_unload(cdi, -1);
}
@@ -1455,7 +1461,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
slot = curslot;
/* set media changed bits on both queues */
- cdi->mc_flags = 0x3;
+ signal_media_change(cdi);
if ((ret = cdrom_load_unload(cdi, slot)))
return ret;
@@ -1521,7 +1527,7 @@ int media_changed(struct cdrom_device_info *cdi, int queue)
cdi->ioctl_events = 0;
if (changed) {
- cdi->mc_flags = 0x3; /* set bit on both queues */
+ signal_media_change(cdi);
ret |= 1;
cdi->media_written = 0;
}
@@ -2336,6 +2342,49 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
return ret;
}
+/*
+ * Media change detection with timing information.
+ *
+ * arg is a pointer to a cdrom_timed_media_change_info struct.
+ * arg->last_media_change may be set by calling code to signal
+ * the timestamp (in ms) of the last known media change (by the caller).
+ * Upon successful return, ioctl call will set arg->last_media_change
+ * to the latest media change timestamp known by the kernel/driver
+ * and set arg->has_changed to 1 if that timestamp is more recent
+ * than the timestamp set by the caller.
+ */
+static int cdrom_ioctl_timed_media_change(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ int ret;
+ struct cdrom_timed_media_change_info __user *info;
+ struct cdrom_timed_media_change_info tmp_info;
+
+ if (!CDROM_CAN(CDC_MEDIA_CHANGED))
+ return -ENOSYS;
+
+ info = (struct cdrom_timed_media_change_info __user *)arg;
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_TIMED_MEDIA_CHANGE\n");
+
+ ret = cdrom_ioctl_media_changed(cdi, CDSL_CURRENT);
+ if (ret < 0)
+ return ret;
+
+ if (copy_from_user(&tmp_info, info, sizeof(tmp_info)) != 0)
+ return -EFAULT;
+
+ tmp_info.media_flags = 0;
+ if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
+ tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
+
+ tmp_info.last_media_change = cdi->last_media_change_ms;
+
+ if (copy_to_user(info, &tmp_info, sizeof(*info)) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
unsigned long arg)
{
@@ -3313,6 +3362,8 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
return cdrom_ioctl_eject_sw(cdi, arg);
case CDROM_MEDIA_CHANGED:
return cdrom_ioctl_media_changed(cdi, arg);
+ case CDROM_TIMED_MEDIA_CHANGE:
+ return cdrom_ioctl_timed_media_change(cdi, arg);
case CDROM_SET_OPTIONS:
return cdrom_ioctl_set_options(cdi, arg);
case CDROM_CLEAR_OPTIONS:
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 8e1fe75af93f..d50cc1fd34d5 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -805,9 +805,14 @@ static int probe_gdrom(struct platform_device *devptr)
err = -ENOMEM;
goto probe_fail_free_irqs;
}
- add_disk(gd.disk);
+ err = add_disk(gd.disk);
+ if (err)
+ goto probe_fail_add_disk;
+
return 0;
+probe_fail_add_disk:
+ kfree(gd.toc);
probe_fail_free_irqs:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 249b31197eea..b061e6b513ed 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -69,12 +69,21 @@ config IPMI_SI
config IPMI_SSIF
tristate 'IPMI SMBus handler (SSIF)'
- select I2C
+ depends on I2C
help
Provides a driver for a SMBus interface to a BMC, meaning that you
have a driver that must be accessed over an I2C bus instead of a
standard interface. This module requires I2C support.
+config IPMI_IPMB
+ tristate 'IPMI IPMB interface'
+ depends on I2C && I2C_SLAVE
+ help
+ Provides a driver for a system running right on the IPMB bus.
+ It supports normal system interface messages to a BMC on the IPMB
+ bus, and it also supports direct messaging on the bus using
+ IPMB direct messages. This module requires I2C support.
+
config IPMI_POWERNV
depends on PPC_POWERNV
tristate 'POWERNV (OPAL firmware) IPMI interface'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 84f47d18007f..7ce790efad92 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IPMI_SI) += ipmi_si.o
obj-$(CONFIG_IPMI_DMI_DECODE) += ipmi_dmi.o
obj-$(CONFIG_IPMI_PLAT_DATA) += ipmi_plat_data.o
obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
+obj-$(CONFIG_IPMI_IPMB) += ipmi_ipmb.o
obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 6e3d247b55d1..7450904e330a 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -8,13 +8,11 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/mfd/syscon.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
-#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/timer.h>
@@ -59,8 +57,7 @@
struct bt_bmc {
struct device dev;
struct miscdevice miscdev;
- struct regmap *map;
- int offset;
+ void __iomem *base;
int irq;
wait_queue_head_t queue;
struct timer_list poll_timer;
@@ -69,29 +66,14 @@ struct bt_bmc {
static atomic_t open_count = ATOMIC_INIT(0);
-static const struct regmap_config bt_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
{
- uint32_t val = 0;
- int rc;
-
- rc = regmap_read(bt_bmc->map, bt_bmc->offset + reg, &val);
- WARN(rc != 0, "regmap_read() failed: %d\n", rc);
-
- return rc == 0 ? (u8) val : 0;
+ return readb(bt_bmc->base + reg);
}
static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
{
- int rc;
-
- rc = regmap_write(bt_bmc->map, bt_bmc->offset + reg, data);
- WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+ writeb(data, bt_bmc->base + reg);
}
static void clr_rd_ptr(struct bt_bmc *bt_bmc)
@@ -376,18 +358,15 @@ static irqreturn_t bt_bmc_irq(int irq, void *arg)
{
struct bt_bmc *bt_bmc = arg;
u32 reg;
- int rc;
- rc = regmap_read(bt_bmc->map, bt_bmc->offset + BT_CR2, &reg);
- if (rc)
- return IRQ_NONE;
+ reg = readl(bt_bmc->base + BT_CR2);
reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
if (!reg)
return IRQ_NONE;
/* ack pending IRQs */
- regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR2, reg);
+ writel(reg, bt_bmc->base + BT_CR2);
wake_up(&bt_bmc->queue);
return IRQ_HANDLED;
@@ -398,6 +377,7 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
{
struct device *dev = &pdev->dev;
int rc;
+ u32 reg;
bt_bmc->irq = platform_get_irq_optional(pdev, 0);
if (bt_bmc->irq < 0)
@@ -417,11 +397,11 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
* will be cleared (along with B2H) when we can write the next
* message to the BT buffer
*/
- rc = regmap_update_bits(bt_bmc->map, bt_bmc->offset + BT_CR1,
- (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY),
- (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY));
+ reg = readl(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ writel(reg, bt_bmc->base + BT_CR1);
- return rc;
+ return 0;
}
static int bt_bmc_probe(struct platform_device *pdev)
@@ -439,25 +419,9 @@ static int bt_bmc_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, bt_bmc);
- bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
- if (IS_ERR(bt_bmc->map)) {
- void __iomem *base;
-
- /*
- * Assume it's not the MFD-based devicetree description, in
- * which case generate a regmap ourselves
- */
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- bt_bmc->map = devm_regmap_init_mmio(dev, base, &bt_regmap_cfg);
- bt_bmc->offset = 0;
- } else {
- rc = of_property_read_u32(dev->of_node, "reg", &bt_bmc->offset);
- if (rc)
- return rc;
- }
+ bt_bmc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
mutex_init(&bt_bmc->mutex);
init_waitqueue_head(&bt_bmc->queue);
@@ -483,12 +447,12 @@ static int bt_bmc_probe(struct platform_device *pdev)
add_timer(&bt_bmc->poll_timer);
}
- regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR0,
- (BT_IO_BASE << BT_CR0_IO_BASE) |
+ writel((BT_IO_BASE << BT_CR0_IO_BASE) |
(BT_IRQ << BT_CR0_IRQ) |
BT_CR0_EN_CLR_SLV_RDP |
BT_CR0_EN_CLR_SLV_WRP |
- BT_CR0_ENABLE_IBT);
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
clr_b_busy(bt_bmc);
@@ -508,6 +472,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
static const struct of_device_id bt_bmc_match[] = {
{ .compatible = "aspeed,ast2400-ibt-bmc" },
{ .compatible = "aspeed,ast2500-ibt-bmc" },
+ { .compatible = "aspeed,ast2600-ibt-bmc" },
{ },
};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 3dd1d5abb298..d160fa4c73fe 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -247,11 +247,13 @@ static int handle_recv(struct ipmi_file_private *priv,
if (msg->msg.data_len > 0) {
if (rsp->msg.data_len < msg->msg.data_len) {
- rv2 = -EMSGSIZE;
- if (trunc)
+ if (trunc) {
+ rv2 = -EMSGSIZE;
msg->msg.data_len = rsp->msg.data_len;
- else
+ } else {
+ rv = -EMSGSIZE;
goto recv_putback_on_err;
+ }
}
if (copy_to_user(rsp->msg.data,
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
new file mode 100644
index 000000000000..ba0c2d2c6bbe
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Driver to talk to a remote management controller on IPMB.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/ipmi_msgdefs.h>
+#include <linux/ipmi_smi.h>
+
+#define DEVICE_NAME "ipmi-ipmb"
+
+static int bmcaddr = 0x20;
+module_param(bmcaddr, int, 0644);
+MODULE_PARM_DESC(bmcaddr, "Address to use for BMC.");
+
+static unsigned int retry_time_ms = 250;
+module_param(retry_time_ms, uint, 0644);
+MODULE_PARM_DESC(max_retries, "Timeout time between retries, in milliseconds.");
+
+static unsigned int max_retries = 1;
+module_param(max_retries, uint, 0644);
+MODULE_PARM_DESC(max_retries, "Max resends of a command before timing out.");
+
+/* Add room for the two slave addresses, two checksums, and rqSeq. */
+#define IPMB_MAX_MSG_LEN (IPMI_MAX_MSG_LENGTH + 5)
+
+struct ipmi_ipmb_dev {
+ struct ipmi_smi *intf;
+ struct i2c_client *client;
+
+ struct ipmi_smi_handlers handlers;
+
+ bool ready;
+
+ u8 curr_seq;
+
+ u8 bmcaddr;
+ u32 retry_time_ms;
+ u32 max_retries;
+
+ struct ipmi_smi_msg *next_msg;
+ struct ipmi_smi_msg *working_msg;
+
+ /* Transmit thread. */
+ struct task_struct *thread;
+ struct semaphore wake_thread;
+ struct semaphore got_rsp;
+ spinlock_t lock;
+ bool stopping;
+
+ u8 xmitmsg[IPMB_MAX_MSG_LEN];
+ unsigned int xmitlen;
+
+ u8 rcvmsg[IPMB_MAX_MSG_LEN];
+ unsigned int rcvlen;
+ bool overrun;
+};
+
+static bool valid_ipmb(struct ipmi_ipmb_dev *iidev)
+{
+ u8 *msg = iidev->rcvmsg;
+ u8 netfn;
+
+ if (iidev->overrun)
+ return false;
+
+ /* Minimum message size. */
+ if (iidev->rcvlen < 7)
+ return false;
+
+ /* Is it a response? */
+ netfn = msg[1] >> 2;
+ if (netfn & 1) {
+ /* Response messages have an added completion code. */
+ if (iidev->rcvlen < 8)
+ return false;
+ }
+
+ if (ipmb_checksum(msg, 3) != 0)
+ return false;
+ if (ipmb_checksum(msg + 3, iidev->rcvlen - 3) != 0)
+ return false;
+
+ return true;
+}
+
+static void ipmi_ipmb_check_msg_done(struct ipmi_ipmb_dev *iidev)
+{
+ struct ipmi_smi_msg *imsg = NULL;
+ u8 *msg = iidev->rcvmsg;
+ bool is_cmd;
+ unsigned long flags;
+
+ if (iidev->rcvlen == 0)
+ return;
+ if (!valid_ipmb(iidev))
+ goto done;
+
+ is_cmd = ((msg[1] >> 2) & 1) == 0;
+
+ if (is_cmd) {
+ /* Ignore commands until we are up. */
+ if (!iidev->ready)
+ goto done;
+
+ /* It's a command, allocate a message for it. */
+ imsg = ipmi_alloc_smi_msg();
+ if (!imsg)
+ goto done;
+ imsg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ imsg->data_size = 0;
+ } else {
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->working_msg) {
+ u8 seq = msg[4] >> 2;
+ bool xmit_rsp = (iidev->working_msg->data[0] >> 2) & 1;
+
+ /*
+ * Responses should carry the sequence we sent
+ * them with. If it's a transmitted response,
+ * ignore it. And if the message hasn't been
+ * transmitted, ignore it.
+ */
+ if (!xmit_rsp && seq == iidev->curr_seq) {
+ iidev->curr_seq = (iidev->curr_seq + 1) & 0x3f;
+
+ imsg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ }
+
+ if (!imsg)
+ goto done;
+
+ if (imsg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Keep the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 3, iidev->rcvlen - 4);
+ imsg->rsp_size = iidev->rcvlen - 3;
+ } else {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Skip the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 5, iidev->rcvlen - 6);
+ imsg->rsp_size = iidev->rcvlen - 5;
+ }
+ ipmi_smi_msg_received(iidev->intf, imsg);
+ if (!is_cmd)
+ up(&iidev->got_rsp);
+
+done:
+ iidev->overrun = false;
+ iidev->rcvlen = 0;
+}
+
+/*
+ * The IPMB protocol only supports i2c writes so there is no need to
+ * support I2C_SLAVE_READ* events, except to know if the other end has
+ * issued a read without going to stop mode.
+ */
+static int ipmi_ipmb_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ ipmi_ipmb_check_msg_done(iidev);
+ /*
+ * First byte is the slave address, to ease the checksum
+ * calculation.
+ */
+ iidev->rcvmsg[0] = client->addr << 1;
+ iidev->rcvlen = 1;
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (iidev->rcvlen >= sizeof(iidev->rcvmsg))
+ iidev->overrun = true;
+ else
+ iidev->rcvmsg[iidev->rcvlen++] = *val;
+ break;
+
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_STOP:
+ ipmi_ipmb_check_msg_done(iidev);
+ break;
+
+ case I2C_SLAVE_READ_PROCESSED:
+ break;
+ }
+
+ return 0;
+}
+
+static void ipmi_ipmb_send_response(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg, u8 cc)
+{
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response being sent, we needto return a
+ * response response. Fake a send msg command
+ * response with channel 0. This will always be ipmb
+ * direct.
+ */
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST | 1) << 2;
+ msg->data[3] = IPMI_SEND_MSG_CMD;
+ msg->data[4] = cc;
+ msg->data_size = 5;
+ }
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = msg->data[2];
+ msg->rsp[3] = msg->data[3];
+ msg->rsp[4] = cc;
+ msg->rsp_size = 5;
+ } else {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = cc;
+ msg->rsp_size = 3;
+ }
+ ipmi_smi_msg_received(iidev->intf, msg);
+}
+
+static void ipmi_ipmb_format_for_xmit(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg)
+{
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ iidev->xmitmsg[0] = msg->data[1];
+ iidev->xmitmsg[1] = msg->data[0];
+ memcpy(iidev->xmitmsg + 4, msg->data + 2, msg->data_size - 2);
+ iidev->xmitlen = msg->data_size + 2;
+ } else {
+ iidev->xmitmsg[0] = iidev->bmcaddr;
+ iidev->xmitmsg[1] = msg->data[0];
+ iidev->xmitmsg[4] = 0;
+ memcpy(iidev->xmitmsg + 5, msg->data + 1, msg->data_size - 1);
+ iidev->xmitlen = msg->data_size + 4;
+ }
+ iidev->xmitmsg[3] = iidev->client->addr << 1;
+ if (((msg->data[0] >> 2) & 1) == 0)
+ /* If it's a command, put in our own sequence number. */
+ iidev->xmitmsg[4] = ((iidev->xmitmsg[4] & 0x03) |
+ (iidev->curr_seq << 2));
+
+ /* Now add on the final checksums. */
+ iidev->xmitmsg[2] = ipmb_checksum(iidev->xmitmsg, 2);
+ iidev->xmitmsg[iidev->xmitlen] =
+ ipmb_checksum(iidev->xmitmsg + 3, iidev->xmitlen - 3);
+ iidev->xmitlen++;
+}
+
+static int ipmi_ipmb_thread(void *data)
+{
+ struct ipmi_ipmb_dev *iidev = data;
+
+ while (!kthread_should_stop()) {
+ long ret;
+ struct i2c_msg i2c_msg;
+ struct ipmi_smi_msg *msg = NULL;
+ unsigned long flags;
+ unsigned int retries = 0;
+
+ /* Wait for a message to send */
+ ret = down_interruptible(&iidev->wake_thread);
+ if (iidev->stopping)
+ break;
+ if (ret)
+ continue;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->next_msg) {
+ msg = iidev->next_msg;
+ iidev->next_msg = NULL;
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ if (!msg)
+ continue;
+
+ ipmi_ipmb_format_for_xmit(iidev, msg);
+
+retry:
+ i2c_msg.len = iidev->xmitlen - 1;
+ if (i2c_msg.len > 32) {
+ ipmi_ipmb_send_response(iidev, msg,
+ IPMI_REQ_LEN_EXCEEDED_ERR);
+ continue;
+ }
+
+ i2c_msg.addr = iidev->xmitmsg[0] >> 1;
+ i2c_msg.flags = 0;
+ i2c_msg.buf = iidev->xmitmsg + 1;
+
+ /* Rely on i2c_transfer for a barrier. */
+ iidev->working_msg = msg;
+
+ ret = i2c_transfer(iidev->client->adapter, &i2c_msg, 1);
+
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response, nothing will be returned
+ * by the other end.
+ */
+
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg,
+ ret < 0 ? IPMI_BUS_ERR : 0);
+ continue;
+ }
+ if (ret < 0) {
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg, IPMI_BUS_ERR);
+ continue;
+ }
+
+ /* A command was sent, wait for its response. */
+ ret = down_timeout(&iidev->got_rsp,
+ msecs_to_jiffies(iidev->retry_time_ms));
+
+ /*
+ * Grab the message if we can. If the handler hasn't
+ * already handled it, the message will still be there.
+ */
+ spin_lock_irqsave(&iidev->lock, flags);
+ msg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ if (!msg && ret) {
+ /*
+ * If working_msg is not set and we timed out,
+ * that means the message grabbed by
+ * check_msg_done before we could grab it
+ * here. Wait again for check_msg_done to up
+ * the semaphore.
+ */
+ down(&iidev->got_rsp);
+ } else if (msg && ++retries <= iidev->max_retries) {
+ spin_lock_irqsave(&iidev->lock, flags);
+ iidev->working_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ goto retry;
+ }
+
+ if (msg)
+ ipmi_ipmb_send_response(iidev, msg, IPMI_TIMEOUT_ERR);
+ }
+
+ if (iidev->next_msg)
+ /* Return an unspecified error. */
+ ipmi_ipmb_send_response(iidev, iidev->next_msg, 0xff);
+
+ return 0;
+}
+
+static int ipmi_ipmb_start_processing(void *send_info,
+ struct ipmi_smi *new_intf)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ iidev->intf = new_intf;
+ iidev->ready = true;
+ return 0;
+}
+
+static void ipmi_ipmb_stop_thread(struct ipmi_ipmb_dev *iidev)
+{
+ if (iidev->thread) {
+ struct task_struct *t = iidev->thread;
+
+ iidev->thread = NULL;
+ iidev->stopping = true;
+ up(&iidev->wake_thread);
+ up(&iidev->got_rsp);
+ kthread_stop(t);
+ }
+}
+
+static void ipmi_ipmb_shutdown(void *send_info)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ ipmi_ipmb_stop_thread(iidev);
+}
+
+static void ipmi_ipmb_sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ BUG_ON(iidev->next_msg);
+
+ iidev->next_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ up(&iidev->wake_thread);
+}
+
+static void ipmi_ipmb_request_events(void *send_info)
+{
+ /* We don't fetch events here. */
+}
+
+static int ipmi_ipmb_remove(struct i2c_client *client)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ if (iidev->client) {
+ iidev->client = NULL;
+ i2c_slave_unregister(client);
+ }
+ ipmi_ipmb_stop_thread(iidev);
+
+ return 0;
+}
+
+static int ipmi_ipmb_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ipmi_ipmb_dev *iidev;
+ int rv;
+
+ iidev = devm_kzalloc(&client->dev, sizeof(*iidev), GFP_KERNEL);
+ if (!iidev)
+ return -ENOMEM;
+
+ if (of_property_read_u8(dev->of_node, "bmcaddr", &iidev->bmcaddr) != 0)
+ iidev->bmcaddr = bmcaddr;
+ if (iidev->bmcaddr == 0 || iidev->bmcaddr & 1) {
+ /* Can't have the write bit set. */
+ dev_notice(&client->dev,
+ "Invalid bmc address value %2.2x\n", iidev->bmcaddr);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dev->of_node, "retry-time",
+ &iidev->retry_time_ms) != 0)
+ iidev->retry_time_ms = retry_time_ms;
+
+ if (of_property_read_u32(dev->of_node, "max-retries",
+ &iidev->max_retries) != 0)
+ iidev->max_retries = max_retries;
+
+ i2c_set_clientdata(client, iidev);
+ client->flags |= I2C_CLIENT_SLAVE;
+
+ rv = i2c_slave_register(client, ipmi_ipmb_slave_cb);
+ if (rv)
+ return rv;
+
+ iidev->client = client;
+
+ iidev->handlers.flags = IPMI_SMI_CAN_HANDLE_IPMB_DIRECT;
+ iidev->handlers.start_processing = ipmi_ipmb_start_processing;
+ iidev->handlers.shutdown = ipmi_ipmb_shutdown;
+ iidev->handlers.sender = ipmi_ipmb_sender;
+ iidev->handlers.request_events = ipmi_ipmb_request_events;
+
+ spin_lock_init(&iidev->lock);
+ sema_init(&iidev->wake_thread, 0);
+ sema_init(&iidev->got_rsp, 0);
+
+ iidev->thread = kthread_run(ipmi_ipmb_thread, iidev,
+ "kipmb%4.4x", client->addr);
+ if (IS_ERR(iidev->thread)) {
+ rv = PTR_ERR(iidev->thread);
+ dev_notice(&client->dev,
+ "Could not start kernel thread: error %d\n", rv);
+ goto out_err;
+ }
+
+ rv = ipmi_register_smi(&iidev->handlers,
+ iidev,
+ &client->dev,
+ iidev->bmcaddr);
+ if (rv)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ ipmi_ipmb_remove(client);
+ return rv;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_ipmb_match[] = {
+ { .type = "ipmi", .compatible = DEVICE_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match);
+#else
+#define of_ipmi_ipmb_match NULL
+#endif
+
+static const struct i2c_device_id ipmi_ipmb_id[] = {
+ { DEVICE_NAME, 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id);
+
+static struct i2c_driver ipmi_ipmb_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = of_ipmi_ipmb_match,
+ },
+ .probe = ipmi_ipmb_probe,
+ .remove = ipmi_ipmb_remove,
+ .id_table = ipmi_ipmb_id,
+};
+module_i2c_driver(ipmi_ipmb_driver);
+
+MODULE_AUTHOR("Corey Minyard");
+MODULE_DESCRIPTION("IPMI IPMB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e96cb5c4f97a..deed355422f4 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -653,6 +653,11 @@ static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
}
+static int is_ipmb_direct_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
+}
+
static void free_recv_msg_list(struct list_head *q)
{
struct ipmi_recv_msg *msg, *msg2;
@@ -805,6 +810,17 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
&& (ipmb_addr1->lun == ipmb_addr2->lun));
}
+ if (is_ipmb_direct_addr(addr1)) {
+ struct ipmi_ipmb_direct_addr *daddr1
+ = (struct ipmi_ipmb_direct_addr *) addr1;
+ struct ipmi_ipmb_direct_addr *daddr2
+ = (struct ipmi_ipmb_direct_addr *) addr2;
+
+ return daddr1->slave_addr == daddr2->slave_addr &&
+ daddr1->rq_lun == daddr2->rq_lun &&
+ daddr1->rs_lun == daddr2->rs_lun;
+ }
+
if (is_lan_addr(addr1)) {
struct ipmi_lan_addr *lan_addr1
= (struct ipmi_lan_addr *) addr1;
@@ -843,6 +859,23 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len)
return 0;
}
+ if (is_ipmb_direct_addr(addr)) {
+ struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
+
+ if (addr->channel != 0)
+ return -EINVAL;
+ if (len < sizeof(struct ipmi_ipmb_direct_addr))
+ return -EINVAL;
+
+ if (daddr->slave_addr & 0x01)
+ return -EINVAL;
+ if (daddr->rq_lun >= 4)
+ return -EINVAL;
+ if (daddr->rs_lun >= 4)
+ return -EINVAL;
+ return 0;
+ }
+
if (is_lan_addr(addr)) {
if (len < sizeof(struct ipmi_lan_addr))
return -EINVAL;
@@ -862,6 +895,9 @@ unsigned int ipmi_addr_length(int addr_type)
|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
return sizeof(struct ipmi_ipmb_addr);
+ if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
+ return sizeof(struct ipmi_ipmb_direct_addr);
+
if (addr_type == IPMI_LAN_ADDR_TYPE)
return sizeof(struct ipmi_lan_addr);
@@ -1710,7 +1746,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
}
EXPORT_SYMBOL(ipmi_unregister_for_cmd);
-static unsigned char
+unsigned char
ipmb_checksum(unsigned char *data, int size)
{
unsigned char csum = 0;
@@ -1720,6 +1756,7 @@ ipmb_checksum(unsigned char *data, int size)
return -csum;
}
+EXPORT_SYMBOL(ipmb_checksum);
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
struct kernel_ipmi_msg *msg,
@@ -2051,6 +2088,58 @@ out_err:
return rv;
}
+static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned char source_lun)
+{
+ struct ipmi_ipmb_direct_addr *daddr;
+ bool is_cmd = !(recv_msg->msg.netfn & 0x1);
+
+ if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
+ return -EAFNOSUPPORT;
+
+ /* Responses must have a completion code. */
+ if (!is_cmd && msg->data_len < 1) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ daddr = (struct ipmi_ipmb_direct_addr *) addr;
+ if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ smi_msg->msgid = msgid;
+
+ if (is_cmd) {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
+ } else {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
+ }
+ smi_msg->data[1] = daddr->slave_addr;
+ smi_msg->data[3] = msg->cmd;
+
+ memcpy(smi_msg->data + 4, msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 4;
+
+ smi_msg->user_data = recv_msg;
+
+ return 0;
+}
+
static int i_ipmi_req_lan(struct ipmi_smi *intf,
struct ipmi_addr *addr,
long msgid,
@@ -2240,6 +2329,9 @@ static int i_ipmi_request(struct ipmi_user *user,
rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
source_address, source_lun,
retries, retry_time_ms);
+ } else if (is_ipmb_direct_addr(addr)) {
+ rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
+ recv_msg, source_lun);
} else if (is_lan_addr(addr)) {
rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
source_lun, retries, retry_time_ms);
@@ -2369,6 +2461,13 @@ static void bmc_device_id_handler(struct ipmi_smi *intf,
return;
}
+ if (msg->msg.data[0]) {
+ dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
+ msg->msg.data[0]);
+ intf->bmc->dyn_id_set = 0;
+ goto out;
+ }
+
rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
if (rv) {
@@ -2384,7 +2483,7 @@ static void bmc_device_id_handler(struct ipmi_smi *intf,
smp_wmb();
intf->bmc->dyn_id_set = 1;
}
-
+out:
wake_up(&intf->waitq);
}
@@ -2617,7 +2716,7 @@ static ssize_t device_id_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "%u\n", id.device_id);
+ return sysfs_emit(buf, "%u\n", id.device_id);
}
static DEVICE_ATTR_RO(device_id);
@@ -2633,7 +2732,7 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
+ return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
}
static DEVICE_ATTR_RO(provides_device_sdrs);
@@ -2648,7 +2747,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
if (rv)
return rv;
- return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
+ return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
}
static DEVICE_ATTR_RO(revision);
@@ -2664,7 +2763,7 @@ static ssize_t firmware_revision_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
+ return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
id.firmware_revision_2);
}
static DEVICE_ATTR_RO(firmware_revision);
@@ -2681,7 +2780,7 @@ static ssize_t ipmi_version_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 20, "%u.%u\n",
+ return sysfs_emit(buf, "%u.%u\n",
ipmi_version_major(&id),
ipmi_version_minor(&id));
}
@@ -2699,7 +2798,7 @@ static ssize_t add_dev_support_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
+ return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
}
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
NULL);
@@ -2716,7 +2815,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
+ return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
}
static DEVICE_ATTR_RO(manufacturer_id);
@@ -2732,7 +2831,7 @@ static ssize_t product_id_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
+ return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
}
static DEVICE_ATTR_RO(product_id);
@@ -2748,7 +2847,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
id.aux_firmware_revision[3],
id.aux_firmware_revision[2],
id.aux_firmware_revision[1],
@@ -2770,7 +2869,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
if (!guid_set)
return -ENOENT;
- return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
+ return sysfs_emit(buf, "%pUl\n", &guid);
}
static DEVICE_ATTR_RO(guid);
@@ -3794,6 +3893,123 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
return rv;
}
+static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ struct ipmi_user *user = NULL;
+ struct ipmi_ipmb_direct_addr *daddr;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned char netfn = msg->rsp[0] >> 2;
+ unsigned char cmd = msg->rsp[3];
+
+ rcu_read_lock();
+ /* We always use channel 0 for direct messages. */
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
+ msg->data[1] = msg->rsp[2];
+ msg->data[2] = msg->rsp[4] & ~0x3;
+ msg->data[3] = cmd;
+ msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data_size = 5;
+
+ rcu_read_lock();
+ if (!intf->in_shutdown) {
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value
+ * that causes it to not be freed or
+ * queued.
+ */
+ rv = -1;
+ }
+ rcu_read_unlock();
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg;
+ struct ipmi_ipmb_direct_addr *daddr;
+
+ recv_msg = (struct ipmi_recv_msg *) msg->user_data;
+ if (recv_msg == NULL) {
+ dev_warn(intf->si_dev,
+ "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
+ return 0;
+ }
+
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ recv_msg->msgid = msg->msgid;
+ daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rq_lun = msg->rsp[0] & 3;
+ daddr->rs_lun = msg->rsp[2] & 3;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ deliver_local_response(intf, recv_msg);
+
+ return 0;
+}
+
static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
@@ -4219,18 +4435,40 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
- int requeue;
+ int requeue = 0;
int chan;
+ unsigned char cc;
+ bool is_cmd = !((msg->rsp[0] >> 2) & 1);
pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
- if ((msg->data_size >= 2)
+ if (msg->rsp_size < 2) {
+ /* Message is too small to be correct. */
+ dev_warn(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+
+return_unspecified:
+ /* Generate an error response for the message. */
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ /* commands must have at least 3 bytes, responses 4. */
+ if (is_cmd && (msg->rsp_size < 3)) {
+ ipmi_inc_stat(intf, invalid_commands);
+ goto out;
+ }
+ if (!is_cmd && (msg->rsp_size < 4))
+ goto return_unspecified;
+ } else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
if (intf->in_shutdown)
- goto free_msg;
+ goto out;
/*
* This is the local response to a command send, start
@@ -4265,21 +4503,6 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
} else
/* The message was sent, start the timer. */
intf_start_seq_timer(intf, msg->msgid);
-free_msg:
- requeue = 0;
- goto out;
-
- } else if (msg->rsp_size < 2) {
- /* Message is too small to be correct. */
- dev_warn(intf->si_dev,
- "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
-
- /* Generate an error response for the message. */
- msg->rsp[0] = msg->data[0] | (1 << 2);
- msg->rsp[1] = msg->data[1];
- msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
- msg->rsp_size = 3;
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
|| (msg->rsp[1] != msg->data[1])) {
/*
@@ -4291,39 +4514,46 @@ free_msg:
(msg->data[0] >> 2) | 1, msg->data[1],
msg->rsp[0] >> 2, msg->rsp[1]);
- /* Generate an error response for the message. */
- msg->rsp[0] = msg->data[0] | (1 << 2);
- msg->rsp[1] = msg->data[1];
- msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
- msg->rsp_size = 3;
+ goto return_unspecified;
}
- if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
- && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL)) {
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ if ((msg->data[0] >> 2) & 1) {
+ /* It's a response to a sent response. */
+ chan = 0;
+ cc = msg->rsp[4];
+ goto process_response_response;
+ }
+ if (is_cmd)
+ requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
+ else
+ requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
*/
- struct ipmi_recv_msg *recv_msg = msg->user_data;
-
- requeue = 0;
- if (msg->rsp_size < 2)
- /* Message is too small to be correct. */
- goto out;
+ struct ipmi_recv_msg *recv_msg;
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
goto out;
+ cc = msg->rsp[2];
+process_response_response:
+ recv_msg = msg->user_data;
+
+ requeue = 0;
if (!recv_msg)
goto out;
recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg_data[0] = cc;
recv_msg->msg.data_len = 1;
- recv_msg->msg_data[0] = msg->rsp[2];
deliver_local_response(intf, recv_msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
@@ -4789,7 +5019,9 @@ static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
static void free_smi_msg(struct ipmi_smi_msg *msg)
{
atomic_dec(&smi_msg_inuse_count);
- kfree(msg);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
}
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
@@ -4808,7 +5040,9 @@ EXPORT_SYMBOL(ipmi_alloc_smi_msg);
static void free_recv_msg(struct ipmi_recv_msg *msg)
{
atomic_dec(&recv_msg_inuse_count);
- kfree(msg);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
}
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
@@ -4826,7 +5060,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
- if (msg->user)
+ if (msg->user && !oops_in_progress)
kref_put(&msg->user->refcount, free_user);
msg->done(msg);
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 6f3272b58ced..64dedb3ef8ec 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1603,7 +1603,7 @@ static ssize_t name##_show(struct device *dev, \
{ \
struct smi_info *smi_info = dev_get_drvdata(dev); \
\
- return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
+ return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \
} \
static DEVICE_ATTR_RO(name)
@@ -1613,7 +1613,7 @@ static ssize_t type_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
}
static DEVICE_ATTR_RO(type);
@@ -1624,7 +1624,7 @@ static ssize_t interrupts_enabled_show(struct device *dev,
struct smi_info *smi_info = dev_get_drvdata(dev);
int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
- return snprintf(buf, 10, "%d\n", enabled);
+ return sysfs_emit(buf, "%d\n", enabled);
}
static DEVICE_ATTR_RO(interrupts_enabled);
@@ -1646,7 +1646,7 @@ static ssize_t params_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return snprintf(buf, 200,
+ return sysfs_emit(buf,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
si_to_str[smi_info->io.si_type],
addr_space_to_str[smi_info->io.addr_space],
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 20d5af92966d..0c62e578749e 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1190,7 +1190,7 @@ static ssize_t ipmi_##name##_show(struct device *dev, \
{ \
struct ssif_info *ssif_info = dev_get_drvdata(dev); \
\
- return snprintf(buf, 10, "%u\n", ssif_get_stat(ssif_info, name));\
+ return sysfs_emit(buf, "%u\n", ssif_get_stat(ssif_info, name));\
} \
static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
@@ -1198,7 +1198,7 @@ static ssize_t ipmi_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, 10, "ssif\n");
+ return sysfs_emit(buf, "ssif\n");
}
static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index e4ff3b50de7f..883b4a341012 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -342,13 +342,17 @@ static atomic_t msg_tofree = ATOMIC_INIT(0);
static DECLARE_COMPLETION(msg_wait);
static void msg_free_smi(struct ipmi_smi_msg *msg)
{
- if (atomic_dec_and_test(&msg_tofree))
- complete(&msg_wait);
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
}
static void msg_free_recv(struct ipmi_recv_msg *msg)
{
- if (atomic_dec_and_test(&msg_tofree))
- complete(&msg_wait);
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
}
static struct ipmi_smi_msg smi_msg = {
.done = msg_free_smi
@@ -434,8 +438,10 @@ static int _ipmi_set_timeout(int do_heartbeat)
rv = __ipmi_set_timeout(&smi_msg,
&recv_msg,
&send_heartbeat_now);
- if (rv)
+ if (rv) {
+ atomic_set(&msg_tofree, 0);
return rv;
+ }
wait_for_completion(&msg_wait);
@@ -497,7 +503,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_inc(&panic_done_count);
+ atomic_add(2, &panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -507,7 +513,7 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_recv_msg,
1);
if (rv)
- atomic_dec(&panic_done_count);
+ atomic_sub(2, &panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -531,12 +537,12 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
- atomic_inc(&panic_done_count);
+ atomic_add(2, &panic_done_count);
rv = __ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
if (rv) {
- atomic_dec(&panic_done_count);
+ atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
@@ -580,6 +586,7 @@ restart:
&recv_msg,
1);
if (rv) {
+ atomic_set(&msg_tofree, 0);
pr_warn("heartbeat send failure: %d\n", rv);
return rv;
}
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
index 7948cabde50b..7e2067628a6c 100644
--- a/drivers/char/ipmi/kcs_bmc_serio.c
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -73,10 +73,12 @@ static int kcs_bmc_serio_add_device(struct kcs_bmc_device *kcs_bmc)
struct serio *port;
priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
/* Use kzalloc() as the allocation is cleaned up with kfree() via serio_unregister_port() */
port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!(priv && port))
+ if (!port)
return -ENOMEM;
port->id.type = SERIO_8042;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 8f1bce0b4fe5..adaec8fd4b16 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -116,8 +116,9 @@ struct cm4000_dev {
wait_queue_head_t atrq; /* wait for ATR valid */
wait_queue_head_t readq; /* used by write to wake blk.read */
- /* warning: do not move this fields.
+ /* warning: do not move this struct group.
* initialising to zero depends on it - see ZERO_DEV below. */
+ struct_group(init,
unsigned char atr_csum;
unsigned char atr_len_retry;
unsigned short atr_len;
@@ -140,12 +141,10 @@ struct cm4000_dev {
struct timer_list timer; /* used to keep monitor running */
int monitor_running;
+ );
};
-#define ZERO_DEV(dev) \
- memset(&dev->atr_csum,0, \
- sizeof(struct cm4000_dev) - \
- offsetof(struct cm4000_dev, atr_csum))
+#define ZERO_DEV(dev) memset(&((dev)->init), 0, sizeof((dev)->init))
static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
static struct class *cmm_class;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index d6ba644f6b00..4a5516406c22 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -76,7 +76,7 @@ config TCG_TIS_SPI_CR50
config TCG_TIS_SYNQUACER
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
- depends on ARCH_SYNQUACER
+ depends on ARCH_SYNQUACER || COMPILE_TEST
select TCG_TIS_CORE
help
If you have a TPM security chip that is compliant with the
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 784b8b3cb903..97e916856cf3 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -455,6 +455,9 @@ static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp,
if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES)
return 0;
+ if (be32_to_cpu(data->count) > (UINT_MAX - TPM_HEADER_SIZE - 9) / 4)
+ return -EFAULT;
+
if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count))
return -EFAULT;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 69579efb247b..b2659a4c4016 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -48,6 +48,7 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
unsigned long timeout, wait_queue_head_t *queue,
bool check_cancel)
{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
unsigned long stop;
long rc;
u8 status;
@@ -80,8 +81,8 @@ again:
}
} else {
do {
- usleep_range(TPM_TIMEOUT_USECS_MIN,
- TPM_TIMEOUT_USECS_MAX);
+ usleep_range(priv->timeout_min,
+ priv->timeout_max);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
@@ -945,7 +946,22 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX);
chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX);
+ priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
+ priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
priv->phy_ops = phy_ops;
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+ goto out_err;
+
+ priv->manufacturer_id = vendor;
+
+ if (priv->manufacturer_id == TPM_VID_ATML &&
+ !(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ priv->timeout_min = TIS_TIMEOUT_MIN_ATML;
+ priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
+ }
+
dev_set_drvdata(&chip->dev, priv);
if (is_bsw()) {
@@ -988,12 +1004,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (rc)
goto out_err;
- rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
- if (rc < 0)
- goto out_err;
-
- priv->manufacturer_id = vendor;
-
rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
if (rc < 0)
goto out_err;
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index b2a3c6c72882..3be24f221e32 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -54,6 +54,8 @@ enum tis_defaults {
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
+ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
};
/* Some timeout values are needed before it is known whether the chip is
@@ -98,6 +100,8 @@ struct tpm_tis_data {
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
unsigned short rng_quality;
+ unsigned int timeout_min; /* usecs */
+ unsigned int timeout_max; /* usecs */
};
struct tpm_tis_phy_ops {
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 54584b4b00d1..aaa59a00eeae 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -267,6 +267,7 @@ static const struct spi_device_id tpm_tis_spi_id[] = {
{ "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
{ "slb9670", (unsigned long)tpm_tis_spi_probe },
{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+ { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe },
{ "cr50", (unsigned long)cr50_spi_probe },
{}
};
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 0506046a5f4b..510a9965633b 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -58,11 +58,8 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
long rate;
int i;
- if (rate_hw && rate_ops && rate_ops->determine_rate) {
- __clk_hw_set_clk(rate_hw, hw);
- return rate_ops->determine_rate(rate_hw, req);
- } else if (rate_hw && rate_ops && rate_ops->round_rate &&
- mux_hw && mux_ops && mux_ops->set_parent) {
+ if (rate_hw && rate_ops && rate_ops->round_rate &&
+ mux_hw && mux_ops && mux_ops->set_parent) {
req->best_parent_hw = NULL;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
@@ -107,6 +104,9 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
req->rate = best_rate;
return 0;
+ } else if (rate_hw && rate_ops && rate_ops->determine_rate) {
+ __clk_hw_set_clk(rate_hw, hw);
+ return rate_ops->determine_rate(rate_hw, req);
} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
__clk_hw_set_clk(mux_hw, hw);
return mux_ops->determine_rate(mux_hw, req);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 0f5e3983951a..f65e31bab9ae 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -24,6 +24,7 @@ config I8253_LOCK
config OMAP_DM_TIMER
bool
+ select TIMER_OF
config CLKBLD_I8253
def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
@@ -418,12 +419,14 @@ config ATMEL_TCB_CLKSRC
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
+ depends on ARCH_EXYNOS || COMPILE_TEST
help
Support for Multi Core Timer controller on Exynos SoCs.
config CLKSRC_SAMSUNG_PWM
bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on HAS_IOMEM
+ depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
help
This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index de93dd1a8c7b..cb18524cc13d 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -225,7 +225,7 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
- write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+ write_aux_reg(ARC_REG_TIMER1_CTRL, ARC_TIMER_CTRL_NH);
sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
@@ -245,7 +245,7 @@ static void arc_timer_event_setup(unsigned int cycles)
write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
- write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+ write_aux_reg(ARC_REG_TIMER0_CTRL, ARC_TIMER_CTRL_IE | ARC_TIMER_CTRL_NH);
}
@@ -294,7 +294,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
* explicitly clears IP bit
* 2. Re-arm interrupt if periodic by writing to IE bit [0]
*/
- write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | ARC_TIMER_CTRL_NH);
evt->event_handler(evt);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index be6d741d404c..9a04eacc4412 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -44,23 +44,29 @@
#define CNTACR_RWVT BIT(4)
#define CNTACR_RWPT BIT(5)
-#define CNTVCT_LO 0x08
-#define CNTVCT_HI 0x0c
+#define CNTVCT_LO 0x00
+#define CNTPCT_LO 0x08
#define CNTFRQ 0x10
-#define CNTP_TVAL 0x28
+#define CNTP_CVAL_LO 0x20
#define CNTP_CTL 0x2c
-#define CNTV_TVAL 0x38
+#define CNTV_CVAL_LO 0x30
#define CNTV_CTL 0x3c
-static unsigned arch_timers_present __initdata;
+/*
+ * The minimum amount of time a generic counter is guaranteed to not roll over
+ * (40 years)
+ */
+#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
-static void __iomem *arch_counter_base __ro_after_init;
+static unsigned arch_timers_present __initdata;
struct arch_timer {
void __iomem *base;
struct clock_event_device evt;
};
+static struct arch_timer *arch_timer_mem __ro_after_init;
+
#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
static u32 arch_timer_rate __ro_after_init;
@@ -96,32 +102,57 @@ static int __init early_evtstrm_cfg(char *buf)
early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
/*
+ * Makes an educated guess at a valid counter width based on the Generic Timer
+ * specification. Of note:
+ * 1) the system counter is at least 56 bits wide
+ * 2) a roll-over time of not less than 40 years
+ *
+ * See 'ARM DDI 0487G.a D11.1.2 ("The system counter")' for more details.
+ */
+static int arch_counter_get_width(void)
+{
+ u64 min_cycles = MIN_ROLLOVER_SECS * arch_timer_rate;
+
+ /* guarantee the returned width is within the valid range */
+ return clamp_val(ilog2(min_cycles - 1) + 1, 56, 64);
+}
+
+/*
* Architected system timer support.
*/
static __always_inline
-void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
+void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
struct clock_event_device *clk)
{
if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- writel_relaxed(val, timer->base + CNTP_CTL);
+ writel_relaxed((u32)val, timer->base + CNTP_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- writel_relaxed(val, timer->base + CNTP_TVAL);
+ case ARCH_TIMER_REG_CVAL:
+ /*
+ * Not guaranteed to be atomic, so the timer
+ * must be disabled at this point.
+ */
+ writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- writel_relaxed(val, timer->base + CNTV_CTL);
+ writel_relaxed((u32)val, timer->base + CNTV_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- writel_relaxed(val, timer->base + CNTV_TVAL);
+ case ARCH_TIMER_REG_CVAL:
+ /* Same restriction as above */
+ writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
break;
+ default:
+ BUILD_BUG();
}
} else {
arch_timer_reg_write_cp15(access, reg, val);
@@ -140,9 +171,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTP_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- val = readl_relaxed(timer->base + CNTP_TVAL);
- break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
@@ -150,9 +180,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTV_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- val = readl_relaxed(timer->base + CNTV_TVAL);
- break;
+ default:
+ BUILD_BUG();
}
} else {
val = arch_timer_reg_read_cp15(access, reg);
@@ -205,13 +234,11 @@ static struct clocksource clocksource_counter = {
.id = CSID_ARM_ARCH_COUNTER,
.rating = 400,
.read = arch_counter_read,
- .mask = CLOCKSOURCE_MASK(56),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct cyclecounter cyclecounter __ro_after_init = {
.read = arch_counter_read_cc,
- .mask = CLOCKSOURCE_MASK(56),
};
struct ate_acpi_oem_info {
@@ -239,16 +266,6 @@ struct ate_acpi_oem_info {
_new; \
})
-static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
-{
- return __fsl_a008585_read_reg(cntp_tval_el0);
-}
-
-static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
-{
- return __fsl_a008585_read_reg(cntv_tval_el0);
-}
-
static u64 notrace fsl_a008585_read_cntpct_el0(void)
{
return __fsl_a008585_read_reg(cntpct_el0);
@@ -285,16 +302,6 @@ static u64 notrace fsl_a008585_read_cntvct_el0(void)
_new; \
})
-static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
-{
- return __hisi_161010101_read_reg(cntp_tval_el0);
-}
-
-static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
-{
- return __hisi_161010101_read_reg(cntv_tval_el0);
-}
-
static u64 notrace hisi_161010101_read_cntpct_el0(void)
{
return __hisi_161010101_read_reg(cntpct_el0);
@@ -379,16 +386,6 @@ static u64 notrace sun50i_a64_read_cntvct_el0(void)
{
return __sun50i_a64_read_reg(cntvct_el0);
}
-
-static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
-{
- return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
-}
-
-static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
-{
- return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
-}
#endif
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
@@ -397,7 +394,7 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
-static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
+static void erratum_set_next_event_generic(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
@@ -418,17 +415,17 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
-static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
+static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
- erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
+ erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
-static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
+static __maybe_unused int erratum_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
- erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
+ erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
@@ -438,12 +435,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt,
.id = "fsl,erratum-a008585",
.desc = "Freescale erratum a005858",
- .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
- .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_HISILICON_ERRATUM_161010101
@@ -451,23 +446,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt,
.id = "hisilicon,erratum-161010101",
.desc = "HiSilicon erratum 161010101",
- .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
- .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
{
.match_type = ate_match_acpi_oem_info,
.id = hisi_161010101_oem_info,
.desc = "HiSilicon erratum 161010101",
- .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
- .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
@@ -484,12 +475,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt,
.id = "allwinner,erratum-unknown1",
.desc = "Allwinner erratum UNKNOWN1",
- .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
- .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
.read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1418040
@@ -727,10 +716,18 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
+ u64 cnt;
+
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
+
+ if (access == ARCH_TIMER_PHYS_ACCESS)
+ cnt = __arch_counter_get_cntpct();
+ else
+ cnt = __arch_counter_get_cntvct();
+
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
@@ -748,23 +745,79 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0;
}
+static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
+{
+ u32 cnt_lo, cnt_hi, tmp_hi;
+
+ do {
+ cnt_hi = readl_relaxed(t->base + offset_lo + 4);
+ cnt_lo = readl_relaxed(t->base + offset_lo);
+ tmp_hi = readl_relaxed(t->base + offset_lo + 4);
+ } while (cnt_hi != tmp_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static __always_inline void set_next_event_mem(const int access, unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct arch_timer *timer = to_arch_timer(clk);
+ unsigned long ctrl;
+ u64 cnt;
+
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+
+ if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
+ cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
+ else
+ cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
+
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+}
+
static int arch_timer_set_next_event_virt_mem(unsigned long evt,
struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
+ set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
return 0;
}
static int arch_timer_set_next_event_phys_mem(unsigned long evt,
struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
+ set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
return 0;
}
+static u64 __arch_timer_check_delta(void)
+{
+#ifdef CONFIG_ARM64
+ const struct midr_range broken_cval_midrs[] = {
+ /*
+ * XGene-1 implements CVAL in terms of TVAL, meaning
+ * that the maximum timer range is 32bit. Shame on them.
+ */
+ MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
+ APM_CPU_PART_POTENZA)),
+ {},
+ };
+
+ if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
+ pr_warn_once("Broken CNTx_CVAL_EL1, limiting width to 32bits");
+ return CLOCKSOURCE_MASK(32);
+ }
+#endif
+ return CLOCKSOURCE_MASK(arch_counter_get_width());
+}
+
static void __arch_timer_setup(unsigned type,
struct clock_event_device *clk)
{
+ u64 max_delta;
+
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) {
@@ -796,6 +849,7 @@ static void __arch_timer_setup(unsigned type,
}
clk->set_next_event = sne;
+ max_delta = __arch_timer_check_delta();
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
@@ -812,11 +866,13 @@ static void __arch_timer_setup(unsigned type,
clk->set_next_event =
arch_timer_set_next_event_phys_mem;
}
+
+ max_delta = CLOCKSOURCE_MASK(56);
}
clk->set_state_shutdown(clk);
- clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
+ clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
}
static void arch_timer_evtstrm_enable(int divider)
@@ -986,15 +1042,7 @@ bool arch_timer_evtstrm_available(void)
static u64 arch_counter_get_cntvct_mem(void)
{
- u32 vct_lo, vct_hi, tmp_hi;
-
- do {
- vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
- vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
- tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
- } while (vct_hi != tmp_hi);
-
- return ((u64) vct_hi << 32) | vct_lo;
+ return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
}
static struct arch_timer_kvm_info arch_timer_kvm_info;
@@ -1007,6 +1055,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
static void __init arch_counter_register(unsigned type)
{
u64 start_count;
+ int width;
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
@@ -1031,6 +1080,10 @@ static void __init arch_counter_register(unsigned type)
arch_timer_read_counter = arch_counter_get_cntvct_mem;
}
+ width = arch_counter_get_width();
+ clocksource_counter.mask = CLOCKSOURCE_MASK(width);
+ cyclecounter.mask = CLOCKSOURCE_MASK(width);
+
if (!arch_counter_suspend_stop)
clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
start_count = arch_timer_read_counter();
@@ -1040,8 +1093,7 @@ static void __init arch_counter_register(unsigned type)
timecounter_init(&arch_timer_kvm_info.timecounter,
&cyclecounter, start_count);
- /* 56 bits minimum, so we assume worst case rollover */
- sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
+ sched_clock_register(arch_timer_read_counter, width, arch_timer_rate);
}
static void arch_timer_stop(struct clock_event_device *clk)
@@ -1182,25 +1234,25 @@ static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
{
int ret;
irq_handler_t func;
- struct arch_timer *t;
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
+ arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
+ if (!arch_timer_mem)
return -ENOMEM;
- t->base = base;
- t->evt.irq = irq;
- __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
+ arch_timer_mem->base = base;
+ arch_timer_mem->evt.irq = irq;
+ __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
if (arch_timer_mem_use_virtual)
func = arch_timer_handler_virt_mem;
else
func = arch_timer_handler_phys_mem;
- ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
+ ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
if (ret) {
pr_err("Failed to request mem timer irq\n");
- kfree(t);
+ kfree(arch_timer_mem);
+ arch_timer_mem = NULL;
}
return ret;
@@ -1458,7 +1510,6 @@ arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
return ret;
}
- arch_counter_base = base;
arch_timers_present |= ARCH_TIMER_TYPE_MEM;
return 0;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index e89f9e0094b4..c7816c83e324 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -222,8 +222,10 @@ struct chcr_authenc_ctx {
};
struct __aead_ctx {
- struct chcr_gcm_ctx gcm[0];
- struct chcr_authenc_ctx authenc[];
+ union {
+ DECLARE_FLEX_ARRAY(struct chcr_gcm_ctx, gcm);
+ DECLARE_FLEX_ARRAY(struct chcr_authenc_ctx, authenc);
+ };
};
struct chcr_aead_ctx {
@@ -245,9 +247,11 @@ struct hmac_ctx {
};
struct __crypto_ctx {
- struct hmac_ctx hmacctx[0];
- struct ablk_ctx ablkctx[0];
- struct chcr_aead_ctx aeadctx[];
+ union {
+ DECLARE_FLEX_ARRAY(struct hmac_ctx, hmacctx);
+ DECLARE_FLEX_ARRAY(struct ablk_ctx, ablkctx);
+ DECLARE_FLEX_ARRAY(struct chcr_aead_ctx, aeadctx);
+ };
};
struct chcr_context {
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 53927f9fa77e..9db0c402c9ce 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -75,52 +75,27 @@ static inline int cxl_hdm_decoder_count(u32 cap_hdr)
#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
-#define CXL_COMPONENT_REGS() \
- void __iomem *hdm_decoder
-
-#define CXL_DEVICE_REGS() \
- void __iomem *status; \
- void __iomem *mbox; \
- void __iomem *memdev
-
-/* See note for 'struct cxl_regs' for the rationale of this organization */
-/*
- * CXL_COMPONENT_REGS - Common set of CXL Component register block base pointers
- * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
- */
-struct cxl_component_regs {
- CXL_COMPONENT_REGS();
-};
-
-/* See note for 'struct cxl_regs' for the rationale of this organization */
-/*
- * CXL_DEVICE_REGS - Common set of CXL Device register block base pointers
- * @status: CXL 2.0 8.2.8.3 Device Status Registers
- * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
- * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
- */
-struct cxl_device_regs {
- CXL_DEVICE_REGS();
-};
-
/*
- * Note, the anonymous union organization allows for per
- * register-block-type helper routines, without requiring block-type
- * agnostic code to include the prefix.
+ * Using struct_group() allows for per register-block-type helper routines,
+ * without requiring block-type agnostic code to include the prefix.
*/
struct cxl_regs {
- union {
- struct {
- CXL_COMPONENT_REGS();
- };
- struct cxl_component_regs component;
- };
- union {
- struct {
- CXL_DEVICE_REGS();
- };
- struct cxl_device_regs device_regs;
- };
+ /*
+ * Common set of CXL Component register block base pointers
+ * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
+ */
+ struct_group_tagged(cxl_component_regs, component,
+ void __iomem *hdm_decoder;
+ );
+ /*
+ * Common set of CXL Device register block base pointers
+ * @status: CXL 2.0 8.2.8.3 Device Status Registers
+ * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
+ * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
+ */
+ struct_group_tagged(cxl_device_regs, device_regs,
+ void __iomem *status, *mbox, *memdev;
+ );
};
struct cxl_reg_map {
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 4a2a796e348c..52d04641e361 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -742,8 +742,7 @@ pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
dma_addr_t dma;
int i;
- sw_desc = kzalloc(sizeof(*sw_desc) +
- nb_hw_desc * sizeof(struct pxad_desc_hw *),
+ sw_desc = kzalloc(struct_size(sw_desc, hw_desc, nb_hw_desc),
GFP_NOWAIT);
if (!sw_desc)
return NULL;
diff --git a/drivers/edac/al_mc_edac.c b/drivers/edac/al_mc_edac.c
index 7d4f396c27b5..178b9e581a72 100644
--- a/drivers/edac/al_mc_edac.c
+++ b/drivers/edac/al_mc_edac.c
@@ -238,11 +238,9 @@ static int al_mc_edac_probe(struct platform_device *pdev)
if (!mci)
return -ENOMEM;
- ret = devm_add_action(&pdev->dev, devm_al_mc_edac_free, mci);
- if (ret) {
- edac_mc_free(mci);
+ ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_free, mci);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, mci);
al_mc = mci->pvt_info;
@@ -293,11 +291,9 @@ static int al_mc_edac_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_add_action(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
- if (ret) {
- edac_mc_del_mc(&pdev->dev);
+ ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
+ if (ret)
return ret;
- }
if (al_mc->irq_ue > 0) {
ret = devm_request_irq(&pdev->dev,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 99b06a3e8fb1..4fce75013674 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1065,12 +1065,14 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
#define CS_ODD_PRIMARY BIT(1)
#define CS_EVEN_SECONDARY BIT(2)
#define CS_ODD_SECONDARY BIT(3)
+#define CS_3R_INTERLEAVE BIT(4)
#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
+ u8 base, count = 0;
int cs_mode = 0;
if (csrow_enabled(2 * dimm, ctrl, pvt))
@@ -1083,6 +1085,20 @@ static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_SECONDARY;
+ /*
+ * 3 Rank inteleaving support.
+ * There should be only three bases enabled and their two masks should
+ * be equal.
+ */
+ for_each_chip_select(base, ctrl, pvt)
+ count += csrow_enabled(base, ctrl, pvt);
+
+ if (count == 3 &&
+ pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
+ edac_dbg(1, "3R interleaving in use.\n");
+ cs_mode |= CS_3R_INTERLEAVE;
+ }
+
return cs_mode;
}
@@ -1891,10 +1907,14 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
*
* The MSB is the number of bits in the full mask because BIT[0] is
* always 0.
+ *
+ * In the special 3 Rank interleaving case, a single bit is flipped
+ * without swapping with the most significant bit. This can be handled
+ * by keeping the MSB where it is and ignoring the single zero bit.
*/
msb = fls(addr_mask_orig) - 1;
weight = hweight_long(addr_mask_orig);
- num_zero_bits = msb - weight;
+ num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 2c5975674723..9f82ca295353 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -66,14 +66,12 @@ unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
char *p = buf;
for (i = 0; i < mci->n_layers; i++) {
- n = snprintf(p, len, "%s %d ",
+ n = scnprintf(p, len, "%s %d ",
edac_layer_name[mci->layers[i].type],
dimm->location[i]);
p += n;
len -= n;
count += n;
- if (!len)
- break;
}
return count;
@@ -341,19 +339,16 @@ static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
*/
len = sizeof(dimm->label);
p = dimm->label;
- n = snprintf(p, len, "mc#%u", mci->mc_idx);
+ n = scnprintf(p, len, "mc#%u", mci->mc_idx);
p += n;
len -= n;
for (layer = 0; layer < mci->n_layers; layer++) {
- n = snprintf(p, len, "%s#%u",
- edac_layer_name[mci->layers[layer].type],
- pos[layer]);
+ n = scnprintf(p, len, "%s#%u",
+ edac_layer_name[mci->layers[layer].type],
+ pos[layer]);
p += n;
len -= n;
dimm->location[layer] = pos[layer];
-
- if (len <= 0)
- break;
}
/* Link it to the csrows old API data */
@@ -1027,12 +1022,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
const char *other_detail)
{
struct dimm_info *dimm;
- char *p;
+ char *p, *end;
int row = -1, chan = -1;
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
int i, n_labels = 0;
struct edac_raw_error_desc *e = &mci->error_desc;
bool any_memory = true;
+ const char *prefix;
edac_dbg(3, "MC%d\n", mci->mc_idx);
@@ -1087,6 +1083,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
*/
p = e->label;
*p = '\0';
+ end = p + sizeof(e->label);
+ prefix = "";
mci_for_each_dimm(mci, dimm) {
if (top_layer >= 0 && top_layer != dimm->location[0])
@@ -1114,12 +1112,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
p = e->label;
*p = '\0';
} else {
- if (p != e->label) {
- strcpy(p, OTHER_LABEL);
- p += strlen(OTHER_LABEL);
- }
- strcpy(p, dimm->label);
- p += strlen(p);
+ p += scnprintf(p, end - p, "%s%s", prefix, dimm->label);
+ prefix = OTHER_LABEL;
}
/*
@@ -1141,25 +1135,25 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
}
if (any_memory)
- strcpy(e->label, "any memory");
+ strscpy(e->label, "any memory", sizeof(e->label));
else if (!*e->label)
- strcpy(e->label, "unknown memory");
+ strscpy(e->label, "unknown memory", sizeof(e->label));
edac_inc_csrow(e, row, chan);
/* Fill the RAM location data */
p = e->location;
+ end = p + sizeof(e->location);
+ prefix = "";
for (i = 0; i < mci->n_layers; i++) {
if (pos[i] < 0)
continue;
- p += sprintf(p, "%s:%d ",
- edac_layer_name[mci->layers[i].type],
- pos[i]);
+ p += scnprintf(p, end - p, "%s%s:%d", prefix,
+ edac_layer_name[mci->layers[i].type], pos[i]);
+ prefix = " ";
}
- if (p > e->location)
- *(p - 1) = '\0';
edac_raw_mc_handle_error(e);
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 2f9f1e74bb35..0a638c97702a 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -744,7 +744,7 @@ static ssize_t mci_ue_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ue_mc);
+ return sprintf(data, "%u\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct device *dev,
@@ -753,7 +753,7 @@ static ssize_t mci_ce_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ce_mc);
+ return sprintf(data, "%u\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct device *dev,
@@ -762,7 +762,7 @@ static ssize_t mci_ce_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ce_noinfo_count);
+ return sprintf(data, "%u\n", mci->ce_noinfo_count);
}
static ssize_t mci_ue_noinfo_show(struct device *dev,
@@ -771,7 +771,7 @@ static ssize_t mci_ue_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ue_noinfo_count);
+ return sprintf(data, "%u\n", mci->ue_noinfo_count);
}
static ssize_t mci_seconds_show(struct device *dev,
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4c626fcd4dcb..1522d4aa2ca6 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1052,7 +1052,7 @@ static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
rc = ((reg << 6) | rc) << 26;
- return rc | 0x1ffffff;
+ return rc | 0x3ffffff;
}
static u64 knl_get_tolm(struct sbridge_pvt *pvt)
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 169f96e51c29..6971ded598de 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -245,11 +245,8 @@ static int ti_edac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg)) {
- edac_printk(KERN_ERR, EDAC_MOD_NAME,
- "EMIF controller regs not defined\n");
+ if (IS_ERR(reg))
return PTR_ERR(reg);
- }
layers[0].type = EDAC_MC_LAYER_ALL_MEM;
layers[0].size = 1;
@@ -281,8 +278,6 @@ static int ti_edac_probe(struct platform_device *pdev)
error_irq = platform_get_irq(pdev, 0);
if (error_irq < 0) {
ret = error_irq;
- edac_printk(KERN_ERR, EDAC_MOD_NAME,
- "EMIF irq number not defined.\n");
goto err;
}
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index fb6c651214f3..9f89c17730b1 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
@@ -953,11 +954,25 @@ static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
return DMA_FROM_DEVICE;
}
+static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
+ fw_iso_mc_callback_t callback,
+ void *callback_data)
+{
+ struct fw_iso_context *ctx;
+
+ ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
+ 0, 0, 0, NULL, callback_data);
+ if (!IS_ERR(ctx))
+ ctx->callback.mc = callback;
+
+ return ctx;
+}
+
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
- fw_iso_callback_t cb;
+ union fw_iso_callback cb;
int ret;
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
@@ -970,7 +985,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
if (a->speed > SCODE_3200 || a->channel > 63)
return -EINVAL;
- cb = iso_callback;
+ cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE:
@@ -978,19 +993,24 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
a->channel > 63)
return -EINVAL;
- cb = iso_callback;
+ cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- cb = (fw_iso_callback_t)iso_mc_callback;
+ cb.mc = iso_mc_callback;
break;
default:
return -EINVAL;
}
- context = fw_iso_context_create(client->device->card, a->type,
- a->channel, a->speed, a->header_size, cb, client);
+ if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+ context = fw_iso_mc_context_create(client->device->card, cb.mc,
+ client);
+ else
+ context = fw_iso_context_create(client->device->card, a->type,
+ a->channel, a->speed,
+ a->header_size, cb.sc, client);
if (IS_ERR(context))
return PTR_ERR(context);
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 847f33ffc4ae..ae79c3300129 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -66,7 +66,7 @@ struct mm_struct efi_mm = {
struct workqueue_struct *efi_rts_wq;
-static bool disable_runtime;
+static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT);
static int __init setup_noefi(char *arg)
{
disable_runtime = true;
@@ -97,6 +97,9 @@ static int __init parse_efi_cmdline(char *str)
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
+ if (parse_option_str(str, "runtime"))
+ disable_runtime = false;
+
if (parse_option_str(str, "nosoftreserve"))
set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 9a369a2eda71..116eb465cdb4 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -155,7 +155,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return -ENOMEM;
- cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
+ cpu_groups = kcalloc(nb_available_cpus, sizeof(*cpu_groups),
GFP_KERNEL);
if (!cpu_groups) {
free_cpumask_var(tmp);
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 177d03ef4529..40a052bc6784 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -256,6 +256,11 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
NULL,
0);
+ if (ret) {
+ dev_err(dev, "bgpio_init failed\n");
+ return ret;
+ }
+
gc->direction_input = mlxbf2_gpio_direction_input;
gc->direction_output = mlxbf2_gpio_direction_output;
gc->ngpio = npins;
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index fa9b4d8c3ff5..43ca52fa6f9a 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -224,7 +224,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
}
chip->gc.label = dev_name(dev);
- if (of_property_read_u32(dn, "ngpios", &num_gpios))
+ if (!of_property_read_u32(dn, "ngpios", &num_gpios))
chip->gc.ngpio = num_gpios;
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f18240f87387..7741195eb85e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -38,6 +38,7 @@
#include <drm/drm_probe_helper.h>
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
+#include <linux/cc_platform.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -1269,7 +1270,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
* however, SME requires an indirect IOMMU mapping because the encryption
* bit is beyond the DMA mask of the chip.
*/
- if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
+ ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
dev_info(&pdev->dev,
"SME is not compatible with RAVEN\n");
return -ENOTSUPP;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index ff80786e3918..01efda4398e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -1257,7 +1257,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
if (adev->pdev->device == 0x1681)
- adev->external_rev_id = adev->rev_id + 0x19;
+ adev->external_rev_id = 0x20;
else
adev->external_rev_id = adev->rev_id + 0x01;
break;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 87daa78a32b8..8080bba5b7a7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -263,7 +263,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -487,7 +487,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -639,7 +639,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -914,7 +914,7 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
&param, buf,
max_param_num,
&param_nums)) {
@@ -1211,7 +1211,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1396,7 +1396,7 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1581,7 +1581,7 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1766,7 +1766,7 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1944,7 +1944,7 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -2382,7 +2382,7 @@ static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 4a4894e9d9c9..377c4e53a2b3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -366,32 +366,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 5.32,
- .sr_enter_plus_exit_time_us = 6.38,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.82,
- .sr_enter_plus_exit_time_us = 11.196,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.89,
- .sr_enter_plus_exit_time_us = 11.24,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.748,
- .sr_enter_plus_exit_time_us = 11.102,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
}
@@ -518,14 +518,21 @@ static unsigned int find_clk_for_voltage(
unsigned int voltage)
{
int i;
+ int max_voltage = 0;
+ int clock = 0;
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage)
+ if (clock_table->SocVoltage[i] == voltage) {
return clocks[i];
+ } else if (clock_table->SocVoltage[i] >= max_voltage &&
+ clock_table->SocVoltage[i] < voltage) {
+ max_voltage = clock_table->SocVoltage[i];
+ clock = clocks[i];
+ }
}
- ASSERT(0);
- return 0;
+ ASSERT(clock);
+ return clock;
}
void dcn31_clk_mgr_helper_populate_bw_params(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 3f2333ec67e2..3afa1159a5f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -76,10 +76,6 @@ void dcn31_init_hw(struct dc *dc)
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
- // Initialize the dccg
- if (res_pool->dccg->funcs->dccg_init)
- res_pool->dccg->funcs->dccg_init(res_pool->dccg);
-
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
REG_WRITE(REFCLK_CNTL, 0);
@@ -106,6 +102,9 @@ void dcn31_init_hw(struct dc *dc)
hws->funcs.bios_golden_init(dc);
hws->funcs.disable_vga(dc->hwseq);
}
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
if (dc->debug.enable_mem_low_power.bits.dmcu) {
// Force ERAM to shutdown if DMCU is not enabled
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 0006bbac466c..79e92ecca96c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -217,8 +217,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
.num_states = 5,
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 402.0,
- .sr_enter_plus_exit_z8_time_us = 520.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
@@ -928,7 +928,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 3840,/*upto 4K*/
+ .max_downscale_src_width = 4096,/*upto true 4K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
@@ -1590,6 +1590,13 @@ static int dcn31_populate_dml_pipes_from_context(
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index ce55c9caf9a2..d58925cff420 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -5398,9 +5398,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->MaximumReadBandwidthWithPrefetch =
v->MaximumReadBandwidthWithPrefetch
- + dml_max4(
- v->VActivePixelBandwidth[i][j][k],
- v->VActiveCursorBandwidth[i][j][k]
+ + dml_max3(
+ v->VActivePixelBandwidth[i][j][k]
+ + v->VActiveCursorBandwidth[i][j][k]
+ v->NoOfDPP[i][j][k]
* (v->meta_row_bandwidth[i][j][k]
+ v->dpte_row_bandwidth[i][j][k]),
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 5adc471bef57..3d2f0817e40a 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -227,7 +227,7 @@ enum {
#define FAMILY_YELLOW_CARP 146
#define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x1A
+#define YELLOW_CARP_B0 0x20
#define YELLOW_CARP_UNKNOWN 0xFF
#ifndef ASICREV_IS_YELLOW_CARP
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index e9bd84ec027d..be61975f1470 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -105,6 +105,7 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ mutex_unlock(&psp->dtm_context.mutex);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
status = remove_display_from_topology_v2(hdcp, index);
@@ -115,8 +116,6 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
}
- mutex_unlock(&psp->dtm_context.mutex);
-
return status;
}
@@ -205,6 +204,7 @@ static enum mod_hdcp_status add_display_to_topology_v3(
dtm_cmd->dtm_in_message.topology_update_v3.link_hdcp_cap = link->hdcp_supported_informational;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ mutex_unlock(&psp->dtm_context.mutex);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
status = add_display_to_topology_v2(hdcp, display);
@@ -214,8 +214,6 @@ static enum mod_hdcp_status add_display_to_topology_v3(
HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
}
- mutex_unlock(&psp->dtm_context.mutex);
-
return status;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 6bfaefa01818..1e30eaeb0e1b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1300,18 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
return flags;
}
-static enum drm_connector_status ast_connector_detect(struct drm_connector
- *connector, bool force)
-{
- int r;
-
- r = ast_get_modes(connector);
- if (r <= 0)
- return connector_status_disconnected;
-
- return connector_status_connected;
-}
-
static void ast_connector_destroy(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
@@ -1327,7 +1315,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
static const struct drm_connector_funcs ast_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
- .detect = ast_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = ast_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1355,8 +1342,7 @@ static int ast_connector_init(struct drm_device *dev)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_attach_encoder(connector, encoder);
@@ -1425,8 +1411,6 @@ int ast_mode_config_init(struct ast_private *ast)
drm_mode_config_reset(dev);
- drm_kms_helper_poll_init(dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 30cc59fe6ef7..f19d9acbe959 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -31,7 +31,7 @@
#include <linux/dma-buf-map.h>
#include <linux/export.h>
#include <linux/highmem.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <xen/xen.h>
#include <drm/drm_cache.h>
@@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits)
* Enforce dma_alloc_coherent when memory encryption is active as well
* for the same reasons as for Xen paravirtual hosts.
*/
- if (mem_encrypt_active())
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true;
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index fcfe1a03c4a1..bf8a6e823a15 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -248,7 +248,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
if (ctx->trylock_only) {
lockdep_assert_held(&ctx->ww_ctx);
- if (!ww_mutex_trylock(&lock->mutex))
+ if (!ww_mutex_trylock(&lock->mutex, NULL))
return -EBUSY;
else
return 0;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index f6bdec7fa925..e1b2ce4921ae 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -134,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO 2021 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@@ -185,6 +191,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win2,
+ }, { /* GPD Win 3 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index abe3d61b6243..5cf152be4487 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1916,6 +1916,9 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ if (!crtc_state)
+ return;
+
/*
* Don't clobber DPCD if it's been already read out during output
* setup (eDP) or detect.
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 1257f4f11e66..438bbc7b8147 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -64,7 +64,7 @@ intel_timeline_pin_map(struct intel_timeline *timeline)
timeline->hwsp_map = vaddr;
timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
- clflush(vaddr + ofs);
+ drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
return 0;
}
@@ -225,7 +225,7 @@ void intel_timeline_reset_seqno(const struct intel_timeline *tl)
memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
WRITE_ONCE(*hwsp_seqno, tl->seqno);
- clflush(hwsp_seqno);
+ drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
}
void intel_timeline_enter(struct intel_timeline *tl)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4037030f0984..9023d4ecf3b3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -11048,12 +11048,6 @@ enum skl_power_gate {
#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
-#define BXT_P_CR_MC_BIOS_REQ_0_0_0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7114)
-#define BXT_REQ_DATA_MASK 0x3F
-#define BXT_DRAM_CHANNEL_ACTIVE_SHIFT 12
-#define BXT_DRAM_CHANNEL_ACTIVE_MASK (0xF << 12)
-#define BXT_MEMORY_FREQ_MULTIPLIER_HZ 133333333
-
#define BXT_D_CR_DRP0_DUNIT8 0x1000
#define BXT_D_CR_DRP0_DUNIT9 0x1200
#define BXT_D_CR_DRP0_DUNIT_START 8
@@ -11084,9 +11078,7 @@ enum skl_power_gate {
#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
-#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
-#define SKL_REQ_DATA_MASK (0xF << 0)
#define DG1_GEAR_TYPE REG_BIT(16)
#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 806ad688274b..63fec1c3c132 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -794,7 +794,6 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
__field(u64, ctx)
- __field(u32, guc_id)
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
@@ -805,16 +804,14 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
- __entry->guc_id = rq->context->guc_id;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->tail = rq->tail;
),
- TP_printk("dev=%u, engine=%u:%u, guc_id=%u, ctx=%llu, seqno=%u, tail=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->guc_id, __entry->ctx, __entry->seqno,
- __entry->tail)
+ __entry->ctx, __entry->seqno, __entry->tail)
);
DEFINE_EVENT(i915_request, i915_request_add,
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 5259edacde38..066a9118c374 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <linux/sched/clock.h>
struct drm_i915_private;
struct timer_list;
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 91866520c173..7acce64b0941 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -244,7 +244,6 @@ static int
skl_get_dram_info(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
- u32 mem_freq_khz, val;
int ret;
dram_info->type = skl_get_dram_type(i915);
@@ -255,17 +254,6 @@ skl_get_dram_info(struct drm_i915_private *i915)
if (ret)
return ret;
- val = intel_uncore_read(&i915->uncore,
- SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
- mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
- SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- if (dram_info->num_channels * mem_freq_khz == 0) {
- drm_info(&i915->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -350,24 +338,10 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
static int bxt_get_dram_info(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
- u32 dram_channels;
- u32 mem_freq_khz, val;
- u8 num_active_channels, valid_ranks = 0;
+ u32 val;
+ u8 valid_ranks = 0;
int i;
- val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
- mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
- BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
- num_active_channels = hweight32(dram_channels);
-
- if (mem_freq_khz * num_active_channels == 0) {
- drm_info(&i915->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
/*
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
*/
diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c
index 44327bc629ca..06613ffeaaf8 100644
--- a/drivers/gpu/drm/kmb/kmb_crtc.c
+++ b/drivers/gpu/drm/kmb/kmb_crtc.c
@@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = {
.disable_vblank = kmb_crtc_disable_vblank,
};
-static void kmb_crtc_set_mode(struct drm_crtc *crtc)
+static void kmb_crtc_set_mode(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *m = &crtc->state->adjusted_mode;
@@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc)
unsigned int val = 0;
/* Initialize mipi */
- kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
+ kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
drm_info(dev,
"vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
m->crtc_vsync_start - m->crtc_vdisplay,
@@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
clk_prepare_enable(kmb->kmb_clk.clk_lcd);
- kmb_crtc_set_mode(crtc);
+ kmb_crtc_set_mode(crtc, state);
drm_crtc_vblank_on(crtc);
}
@@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
}
+static enum drm_mode_status
+ kmb_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ int refresh;
+ struct drm_device *dev = crtc->dev;
+ int vfp = mode->vsync_start - mode->vdisplay;
+
+ if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
+ drm_dbg(dev, "height = %d less than %d",
+ mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
+ return MODE_BAD_VVALUE;
+ }
+ if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
+ drm_dbg(dev, "width = %d less than %d",
+ mode->hdisplay, KMB_CRTC_MAX_WIDTH);
+ return MODE_BAD_HVALUE;
+ }
+ refresh = drm_mode_vrefresh(mode);
+ if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
+ drm_dbg(dev, "refresh = %d less than %d or greater than %d",
+ refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
+ return MODE_BAD;
+ }
+
+ if (vfp < KMB_CRTC_MIN_VFP) {
+ drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
.atomic_begin = kmb_crtc_atomic_begin,
.atomic_enable = kmb_crtc_atomic_enable,
.atomic_disable = kmb_crtc_atomic_disable,
.atomic_flush = kmb_crtc_atomic_flush,
+ .mode_valid = kmb_crtc_mode_valid,
};
int kmb_setup_crtc(struct drm_device *drm)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 12ce669650cc..961ac6fb5fcf 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -380,7 +380,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
- if (val & LAYER3_DMA_FIFO_UNDERFLOW)
+ if (val & LAYER3_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
}
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index 69a62e2d03ff..bf085e95b28f 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -20,11 +20,18 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
+/* Platform definitions */
+#define KMB_CRTC_MIN_VFP 4
+#define KMB_CRTC_MAX_WIDTH 1920 /* max width in pixels */
+#define KMB_CRTC_MAX_HEIGHT 1080 /* max height in pixels */
+#define KMB_CRTC_MIN_WIDTH 1920
+#define KMB_CRTC_MIN_HEIGHT 1080
#define KMB_FB_MAX_WIDTH 1920
#define KMB_FB_MAX_HEIGHT 1080
#define KMB_FB_MIN_WIDTH 1
#define KMB_FB_MIN_HEIGHT 1
-
+#define KMB_MIN_VREFRESH 59 /*vertical refresh in Hz */
+#define KMB_MAX_VREFRESH 60 /*vertical refresh in Hz */
#define KMB_LCD_DEFAULT_CLK 200000000
#define KMB_SYS_CLK_MHZ 500
@@ -50,6 +57,7 @@ struct kmb_drm_private {
spinlock_t irq_lock;
int irq_lcd;
int sys_clk_mhz;
+ struct disp_cfg init_disp_cfg[KMB_MAX_PLANES];
struct layer_status plane_status[KMB_MAX_PLANES];
int kmb_under_flow;
int kmb_flush_done;
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c
index 1793cd31b117..f6071882054c 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.c
+++ b/drivers/gpu/drm/kmb/kmb_dsi.c
@@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
return 0;
}
+#define CLK_DIFF_LOW 50
+#define CLK_DIFF_HI 60
+#define SYSCLK_500 500
+
static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
struct mipi_tx_frame_timing_cfg *fg_cfg)
{
@@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
/* 500 Mhz system clock minus 50 to account for the difference in
* MIPI clock speed in RTL tests
*/
- sysclk = kmb_dsi->sys_clk_mhz - 50;
+ if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
+ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
+ } else {
+ /* 700 Mhz clk*/
+ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
+ }
/* PPL-Pixel Packing Layer, LLP-Low Level Protocol
* Frame genartor timing parameters are clocked on the system clock,
@@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
return 0;
}
-static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
+static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
+ struct drm_atomic_state *old_state)
{
struct regmap *msscam;
@@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
return;
}
-
+ drm_atomic_bridge_chain_enable(adv_bridge, old_state);
/* DISABLE MIPI->CIF CONNECTION */
regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
@@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
}
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
- int sys_clk_mhz)
+ int sys_clk_mhz, struct drm_atomic_state *old_state)
{
u64 data_rate;
@@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
mipi_tx_init_cfg.lane_rate_mbps = data_rate;
}
- kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0);
-
/* Initialize mipi controller */
mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
/* Dphy initialization */
mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
- connect_lcd_to_mipi(kmb_dsi);
+ connect_lcd_to_mipi(kmb_dsi, old_state);
dev_info(kmb_dsi->dev, "mipi hw initialized");
return 0;
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.h b/drivers/gpu/drm/kmb/kmb_dsi.h
index 66b7c500d9bc..09dc88743d77 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.h
+++ b/drivers/gpu/drm/kmb/kmb_dsi.h
@@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev);
struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
- int sys_clk_mhz);
+ int sys_clk_mhz, struct drm_atomic_state *old_state);
int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 06b0c42c9e91..00404ba4126d 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = {
static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
{
+ struct kmb_drm_private *kmb;
+ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
int i;
+ int plane_id = kmb_plane->id;
+ struct disp_cfg init_disp_cfg;
+ kmb = to_kmb(plane->dev);
+ init_disp_cfg = kmb->init_disp_cfg[plane_id];
+ /* Due to HW limitations, changing pixel format after initial
+ * plane configuration is not supported.
+ */
+ if (init_disp_cfg.format && init_disp_cfg.format != format) {
+ drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
+ return -EINVAL;
+ }
for (i = 0; i < plane->format_count; i++) {
if (plane->format_types[i] == format)
return 0;
@@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct kmb_drm_private *kmb;
+ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
+ int plane_id = kmb_plane->id;
+ struct disp_cfg init_disp_cfg;
struct drm_framebuffer *fb;
int ret;
struct drm_crtc_state *crtc_state;
bool can_position;
+ kmb = to_kmb(plane->dev);
+ init_disp_cfg = kmb->init_disp_cfg[plane_id];
fb = new_plane_state->fb;
if (!fb || !new_plane_state->crtc)
return 0;
@@ -99,6 +118,16 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
return -EINVAL;
+
+ /* Due to HW limitations, changing plane height or width after
+ * initial plane configuration is not supported.
+ */
+ if ((init_disp_cfg.width && init_disp_cfg.height) &&
+ (init_disp_cfg.width != fb->width ||
+ init_disp_cfg.height != fb->height)) {
+ drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
+ return -EINVAL;
+ }
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
drm_atomic_get_existing_crtc_state(state,
@@ -335,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
unsigned char plane_id;
int num_planes;
static dma_addr_t addr[MAX_SUB_PLANES];
+ struct disp_cfg *init_disp_cfg;
if (!plane || !new_plane_state || !old_plane_state)
return;
@@ -357,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
}
spin_unlock_irq(&kmb->irq_lock);
- src_w = (new_plane_state->src_w >> 16);
+ init_disp_cfg = &kmb->init_disp_cfg[plane_id];
+ src_w = new_plane_state->src_w >> 16;
src_h = new_plane_state->src_h >> 16;
crtc_x = new_plane_state->crtc_x;
crtc_y = new_plane_state->crtc_y;
@@ -500,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
/* Enable DMA */
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
+
+ /* Save initial display config */
+ if (!init_disp_cfg->width ||
+ !init_disp_cfg->height ||
+ !init_disp_cfg->format) {
+ init_disp_cfg->width = width;
+ init_disp_cfg->height = height;
+ init_disp_cfg->format = fb->format->format;
+ }
+
drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h
index 6e8d22cf8819..b51144044fe8 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.h
+++ b/drivers/gpu/drm/kmb/kmb_plane.h
@@ -63,6 +63,12 @@ struct layer_status {
u32 ctrl;
};
+struct disp_cfg {
+ unsigned int width;
+ unsigned int height;
+ unsigned int format;
+};
+
struct kmb_plane *kmb_plane_init(struct drm_device *drm);
void kmb_plane_destroy(struct drm_plane *plane);
#endif /* __KMB_PLANE_H__ */
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 4fd4de16cd32..894472921c30 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -38,16 +38,18 @@
typedef struct drm32_mga_init {
int func;
u32 sarea_priv_offset;
- int chipset;
- int sgram;
- unsigned int maccess;
- unsigned int fb_cpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ struct_group(always32bit,
+ int chipset;
+ int sgram;
+ unsigned int maccess;
+ unsigned int fb_cpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ );
u32 fb_offset;
u32 mmio_offset;
u32 status_offset;
@@ -67,9 +69,8 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
init.func = init32.func;
init.sarea_priv_offset = init32.sarea_priv_offset;
- memcpy(&init.chipset, &init32.chipset,
- offsetof(drm_mga_init_t, fb_offset) -
- offsetof(drm_mga_init_t, chipset));
+ memcpy(&init.always32bit, &init32.always32bit,
+ sizeof(init32.always32bit));
init.fb_offset = init32.fb_offset;
init.mmio_offset = init32.mmio_offset;
init.status_offset = init32.status_offset;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 33da25b81615..267a880811d6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1838,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
adreno_gpu->base.hw_apriv = true;
+ /*
+ * For now only clamp to idle freq for devices where this is known not
+ * to cause power supply issues:
+ */
+ if (info && (info->revn == 618))
+ gpu->clamp_to_idle = true;
+
a6xx_llc_slices_init(pdev, a6xx_gpu);
ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 030f82f149c2..ee25d556c8a1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -203,6 +203,10 @@ struct msm_gpu {
uint32_t suspend_count;
struct msm_gpu_state *crashstate;
+
+ /* Enable clamping to idle freq when inactive: */
+ bool clamp_to_idle;
+
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 84e98c07c900..20006d060b5b 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -200,7 +200,8 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
idle_freq = get_freq(gpu);
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+ if (gpu->clamp_to_idle)
+ msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
df->idle_freq = idle_freq;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index ec0432fe1bdf..86d78634a979 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
struct mxsfb_drm_private *mxsfb = drm->dev_private;
mxsfb_enable_axi_clk(mxsfb);
- mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
+
+ /* Disable and clear VBLANK IRQ */
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+
mxsfb_disable_axi_clk(mxsfb);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index b0c3422cb01f..1a896a24288a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -992,7 +992,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
if (ret)
return ret;
- buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
+ buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
if (!buffer->fault)
return -ENOMEM;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 0145129d7c66..534dd7414d42 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.clock = 69700,
.hdisplay = 800,
- .hsync_start = 800 + 6,
- .hsync_end = 800 + 6 + 15,
- .htotal = 800 + 6 + 15 + 16,
+ .hsync_start = 800 + 52,
+ .hsync_end = 800 + 52 + 8,
+ .htotal = 800 + 52 + 8 + 48,
.vdisplay = 1280,
- .vsync_start = 1280 + 8,
- .vsync_end = 1280 + 8 + 48,
- .vtotal = 1280 + 8 + 48 + 52,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 6,
+ .vtotal = 1280 + 16 + 6 + 15,
.width_mm = 135,
.height_mm = 217,
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
index 1c19a5d3eefb..8d8d8e214c28 100644
--- a/drivers/gpu/drm/selftests/test-drm_damage_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -30,6 +30,7 @@ static void mock_setup(struct drm_plane_state *state)
mock_device.driver = &mock_driver;
mock_device.mode_config.prop_fb_damage_clips = &mock_prop;
mock_plane.dev = &mock_device;
+ mock_obj_props.count = 0;
mock_plane.base.properties = &mock_obj_props;
mock_prop.base.id = 1; /* 0 is an invalid id */
mock_prop.dev = &mock_device;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1c5ffe2935af..abf2d7a4fdf1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -190,6 +190,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
+ dma_resv_fini(&fbo->base.base._resv);
ttm_bo_put(fbo->bo);
kfree(fbo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ab9a1750e1df..bfd71c86faa5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -29,7 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
@@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */
- if (mem_encrypt_active())
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -EINVAL;
if (vmw_force_coherent)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e50fb82a3030..2aceac7856e2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -28,7 +28,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <asm/hypervisor.h>
#include <drm/drm_ioctl.h>
@@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
unsigned long msg_len = strlen(msg);
/* HB port can't access encrypted memory. */
- if (hb && !mem_encrypt_active()) {
+ if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_high;
u32 channel_id = (channel->channel_id << 16);
@@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
unsigned long si, di, eax, ebx, ecx, edx;
/* HB port can't access encrypted memory */
- if (hb && !mem_encrypt_active()) {
+ if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_low;
u32 channel_id = (channel->channel_id << 16);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 8ae301eef643..a9639d098893 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -259,10 +259,24 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_UYVY8_1X16:
+ if (mbus_type == V4L2_MBUS_BT656) {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ } else {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ }
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ break;
case MEDIA_BUS_FMT_YUYV8_1X16:
- cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ if (mbus_type == V4L2_MBUS_BT656) {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ } else {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ }
cfg->mipi_dt = MIPI_DT_YUV422;
- cfg->data_width = IPU_CSI_DATA_WIDTH_16;
break;
case MEDIA_BUS_FMT_SBGGR8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
@@ -332,7 +346,7 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
const struct v4l2_mbus_config *mbus_cfg,
const struct v4l2_mbus_framefmt *mbus_fmt)
{
- int ret;
+ int ret, is_bt1120;
memset(csicfg, 0, sizeof(*csicfg));
@@ -353,11 +367,18 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
break;
case V4L2_MBUS_BT656:
csicfg->ext_vsync = 0;
+ /* UYVY10_1X20 etc. should be supported as well */
+ is_bt1120 = mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ mbus_fmt->code == MEDIA_BUS_FMT_YUYV8_1X16;
if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
mbus_fmt->field == V4L2_FIELD_ALTERNATE)
- csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
+ csicfg->clk_mode = is_bt1120 ?
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR :
+ IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
else
- csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
+ csicfg->clk_mode = is_bt1120 ?
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR :
+ IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
break;
case V4L2_MBUS_CSI2_DPHY:
/*
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 477baa30889c..ece147d1a278 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -129,10 +129,12 @@ struct cp2112_xfer_status_report {
struct cp2112_string_report {
u8 dummy; /* force .string to be aligned */
- u8 report; /* CP2112_*_STRING */
- u8 length; /* length in bytes of everyting after .report */
- u8 type; /* USB_DT_STRING */
- wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */
+ struct_group_attr(contents, __packed,
+ u8 report; /* CP2112_*_STRING */
+ u8 length; /* length in bytes of everything after .report */
+ u8 type; /* USB_DT_STRING */
+ wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */
+ );
} __packed;
/* Number of times to request transfer status before giving up waiting for a
@@ -986,8 +988,8 @@ static ssize_t pstr_show(struct device *kdev,
u8 length;
int ret;
- ret = cp2112_hid_get(hdev, attr->report, &report.report,
- sizeof(report) - 1, HID_FEATURE_REPORT);
+ ret = cp2112_hid_get(hdev, attr->report, (u8 *)&report.contents,
+ sizeof(report.contents), HID_FEATURE_REPORT);
if (ret < 3) {
hid_err(hdev, "error reading %s string: %d\n", kattr->attr.name,
ret);
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 1ca64481145e..ea17abc7ad52 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -857,7 +857,7 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
memcpy(&kone->last_mouse_event, event,
sizeof(struct kone_mouse_event));
else
- memset(&event->tilt, 0, 5);
+ memset(&event->wipe, 0, sizeof(event->wipe));
kone_keep_values_up_to_date(kone, event);
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index 4a1a9cb76b08..65c800e3addc 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -152,11 +152,13 @@ struct kone_mouse_event {
uint16_t x;
uint16_t y;
uint8_t wheel; /* up = 1, down = -1 */
- uint8_t tilt; /* right = 1, left = -1 */
- uint8_t unknown;
- uint8_t event;
- uint8_t value; /* press = 0, release = 1 */
- uint8_t macro_key; /* 0 to 8 */
+ struct_group(wipe,
+ uint8_t tilt; /* right = 1, left = -1 */
+ uint8_t unknown;
+ uint8_t event;
+ uint8_t value; /* press = 0, release = 1 */
+ uint8_t macro_key; /* 0 to 8 */
+ );
} __attribute__ ((__packed__));
enum kone_mouse_events {
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 42f3d9d123a1..d030577ad6a2 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -13,6 +13,7 @@
#define _HYPERV_VMBUS_H
#include <linux/list.h>
+#include <linux/bitops.h>
#include <asm/sync_bitops.h>
#include <asm/hyperv-tlfs.h>
#include <linux/atomic.h>
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 382ef0395d8e..30aae8642069 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -93,6 +93,7 @@ struct slimpro_resp_msg {
struct xgene_hwmon_dev {
struct device *dev;
struct mbox_chan *mbox_chan;
+ struct pcc_mbox_chan *pcc_chan;
struct mbox_client mbox_client;
int mbox_idx;
@@ -652,7 +653,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out_mbox_free;
}
} else {
- struct acpi_pcct_hw_reduced *cppc_ss;
+ struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
int version;
@@ -671,26 +672,16 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
cl->rx_callback = xgene_hwmon_pcc_rx_cb;
- ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(ctx->mbox_chan)) {
+ pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
+ if (IS_ERR(pcc_chan)) {
dev_err(&pdev->dev,
"PPC channel request failed\n");
rc = -ENODEV;
goto out_mbox_free;
}
- /*
- * The PCC mailbox controller driver should
- * have parsed the PCCT (global table of all
- * PCC channels) and stored pointers to the
- * subspace communication region in con_priv.
- */
- cppc_ss = ctx->mbox_chan->con_priv;
- if (!cppc_ss) {
- dev_err(&pdev->dev, "PPC subspace not found\n");
- rc = -ENODEV;
- goto out;
- }
+ ctx->pcc_chan = pcc_chan;
+ ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
dev_err(&pdev->dev, "PCC IRQ not supported\n");
@@ -702,16 +693,16 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
* This is the shared communication region
* for the OS and Platform to communicate over.
*/
- ctx->comm_base_addr = cppc_ss->base_address;
+ ctx->comm_base_addr = pcc_chan->shmem_base_addr;
if (ctx->comm_base_addr) {
if (version == XGENE_HWMON_V2)
ctx->pcc_comm_addr = (void __force *)ioremap(
ctx->comm_base_addr,
- cppc_ss->length);
+ pcc_chan->shmem_size);
else
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
- cppc_ss->length,
+ pcc_chan->shmem_size,
MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
@@ -727,11 +718,11 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
/*
- * cppc_ss->latency is just a Nominal value. In reality
+ * pcc_chan->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
*/
- ctx->usecs_lat = PCC_NUM_RETRIES * cppc_ss->latency;
+ ctx->usecs_lat = PCC_NUM_RETRIES * pcc_chan->latency;
}
ctx->hwmon_dev = hwmon_device_register_with_groups(ctx->dev,
@@ -757,7 +748,7 @@ out:
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
out_mbox_free:
kfifo_free(&ctx->async_msg_fifo);
@@ -773,7 +764,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index bba08cbce6e1..1a19ebad60ad 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -103,6 +103,7 @@ struct slimpro_i2c_dev {
struct i2c_adapter adapter;
struct device *dev;
struct mbox_chan *mbox_chan;
+ struct pcc_mbox_chan *pcc_chan;
struct mbox_client mbox_client;
int mbox_idx;
struct completion rd_complete;
@@ -466,7 +467,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
return PTR_ERR(ctx->mbox_chan);
}
} else {
- struct acpi_pcct_hw_reduced *cppc_ss;
+ struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
int version = XGENE_SLIMPRO_I2C_V1;
@@ -483,24 +484,14 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = false;
cl->rx_callback = slimpro_i2c_pcc_rx_cb;
- ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(ctx->mbox_chan)) {
+ pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
+ if (IS_ERR(pcc_chan)) {
dev_err(&pdev->dev, "PCC mailbox channel request failed\n");
- return PTR_ERR(ctx->mbox_chan);
+ return PTR_ERR(ctx->pcc_chan);
}
- /*
- * The PCC mailbox controller driver should
- * have parsed the PCCT (global table of all
- * PCC channels) and stored pointers to the
- * subspace communication region in con_priv.
- */
- cppc_ss = ctx->mbox_chan->con_priv;
- if (!cppc_ss) {
- dev_err(&pdev->dev, "PPC subspace not found\n");
- rc = -ENOENT;
- goto mbox_err;
- }
+ ctx->pcc_chan = pcc_chan;
+ ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
dev_err(&pdev->dev, "PCC IRQ not supported\n");
@@ -512,17 +503,17 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
* This is the shared communication region
* for the OS and Platform to communicate over.
*/
- ctx->comm_base_addr = cppc_ss->base_address;
+ ctx->comm_base_addr = pcc_chan->shmem_base_addr;
if (ctx->comm_base_addr) {
if (version == XGENE_SLIMPRO_I2C_V2)
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
- cppc_ss->length,
+ pcc_chan->shmem_size,
MEMREMAP_WT);
else
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
- cppc_ss->length,
+ pcc_chan->shmem_size,
MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
@@ -561,7 +552,7 @@ mbox_err:
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
return rc;
}
@@ -575,7 +566,7 @@ static int xgene_slimpro_i2c_remove(struct platform_device *pdev)
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
return 0;
}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a20b8108e160..c00f8e28aab7 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -706,8 +706,9 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Construct the family header first */
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
- memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
- LS_DEVICE_NAME_MAX);
+ strscpy_pad(header->device_name,
+ dev_name(&query->port->agent->device->dev),
+ LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 489b436f19bb..3d42bd2b36bd 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
{
u64 reg;
struct pio_buf *pbuf;
+ LIST_HEAD(wake_list);
if (!sc)
return;
@@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
spin_unlock(&sc->release_lock);
write_seqlock(&sc->waitlock);
- while (!list_empty(&sc->piowait)) {
+ if (!list_empty(&sc->piowait))
+ list_move(&sc->piowait, &wake_list);
+ write_sequnlock(&sc->waitlock);
+ while (!list_empty(&wake_list)) {
struct iowait *wait;
struct rvt_qp *qp;
struct hfi1_qp_priv *priv;
- wait = list_first_entry(&sc->piowait, struct iowait, list);
+ wait = list_first_entry(&wake_list, struct iowait, list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
}
- write_sequnlock(&sc->waitlock);
spin_unlock_irq(&sc->alloc_lock);
}
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 5fb92de1f015..9b544a3b1288 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -1092,12 +1092,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
if (cq->avoid_mem_cflct) {
ext_cqe = (__le64 *)((u8 *)cqe + 32);
get_64bit_val(ext_cqe, 24, &qword7);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
} else {
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
ext_cqe = cq->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, 24, &qword7);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 7110ebf834f9..102dc9342f2a 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -3399,9 +3399,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
}
if (cq_poll_info->ud_vlan_valid) {
- entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
- entry->wc_flags |= IB_WC_WITH_VLAN;
+ u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
+
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
+ if (vlan) {
+ entry->vlan_id = vlan;
+ entry->wc_flags |= IB_WC_WITH_VLAN;
+ }
} else {
entry->sl = 0;
}
diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c
index b68c575eb78e..b0d6ee0739f5 100644
--- a/drivers/infiniband/hw/irdma/ws.c
+++ b/drivers/infiniband/hw/irdma/ws.c
@@ -330,8 +330,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
tc_node->enable = true;
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
- if (ret)
+ if (ret) {
+ vsi->unregister_qset(vsi, tc_node);
goto reg_err;
+ }
}
ibdev_dbg(to_ibdev(vsi->dev),
"WS: Using node %d which represents VSI %d TC %d\n",
@@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
}
goto exit;
+reg_err:
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
+ list_del(&tc_node->siblings);
+ irdma_free_node(vsi, tc_node);
leaf_add_err:
if (list_empty(&vsi_node->child_list_head)) {
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
@@ -369,11 +375,6 @@ vsi_add_err:
exit:
mutex_unlock(&vsi->dev->ws_mutex);
return ret;
-
-reg_err:
- mutex_unlock(&vsi->dev->ws_mutex);
- irdma_ws_remove(vsi, user_pri);
- return ret;
}
/**
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3be36ebbf67a..22e2f4d79743 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1339,7 +1339,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
goto err_2;
}
mr->mmkey.type = MLX5_MKEY_MR;
- mr->desc_size = sizeof(struct mlx5_mtt);
mr->umem = umem;
set_mr_fields(dev, mr, umem->length, access_flags);
kvfree(in);
@@ -1533,6 +1532,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
}
+ xa_init(&mr->implicit_children);
odp->private = mr;
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index b2fca110346c..e5abbcfc1d57 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4458,6 +4458,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 3cb4febaad0f..8def88cfa300 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -455,6 +455,7 @@ struct qedr_qp {
/* synchronization objects used with iwarp ep */
struct kref refcnt;
struct completion iwarp_cm_comp;
+ struct completion qp_rel_comp;
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 1715fbe0719d..a51fc6854984 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
{
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
- kfree(qp);
+ complete(&qp->qp_rel_comp);
}
static void
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 3fbf172dbbef..dcb3653db72d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1357,6 +1357,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
kref_init(&qp->refcnt);
init_completion(&qp->iwarp_cm_comp);
+ init_completion(&qp->qp_rel_comp);
}
qp->pd = pd;
@@ -2857,8 +2858,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_free_qp_resources(dev, qp, udata);
- if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
qedr_iw_qp_rem_ref(&qp->ibqp);
+ wait_for_completion(&qp->qp_rel_comp);
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index a67599b5a550..ac11943a5ddb 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -602,7 +602,7 @@ done:
/*
* How many pages in this iovec element?
*/
-static int qib_user_sdma_num_pages(const struct iovec *iov)
+static size_t qib_user_sdma_num_pages(const struct iovec *iov)
{
const unsigned long addr = (unsigned long) iov->iov_base;
const unsigned long len = iov->iov_len;
@@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
- unsigned long addr, int tlen, int npages)
+ unsigned long addr, int tlen, size_t npages)
{
struct page *pages[8];
int i, j;
@@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
unsigned long idx;
for (idx = 0; idx < niov; idx++) {
- const int npages = qib_user_sdma_num_pages(iov + idx);
+ const size_t npages = qib_user_sdma_num_pages(iov + idx);
const unsigned long addr = (unsigned long) iov[idx].iov_base;
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
@@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
unsigned pktnw;
unsigned pktnwc;
int nfrags = 0;
- int npages = 0;
- int bytes_togo = 0;
+ size_t npages = 0;
+ size_t bytes_togo = 0;
int tiddma = 0;
int cfur;
@@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
npages += qib_user_sdma_num_pages(&iov[idx]);
- bytes_togo += slen;
+ if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
+ bytes_togo > type_max(typeof(pkt->bytes_togo))) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
pktnwc += slen >> 2;
idx++;
nfrags++;
@@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
}
if (frag_size) {
- int tidsmsize, n;
- size_t pktsize;
+ size_t tidsmsize, n, pktsize, sz, addrlimit;
n = npages*((2*PAGE_SIZE/frag_size)+1);
pktsize = struct_size(pkt, addr, n);
@@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
else
tidsmsize = 0;
- pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
+ if (check_add_overflow(pktsize, tidsmsize, &sz)) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+ pkt = kmalloc(sz, GFP_KERNEL);
if (!pkt) {
ret = -ENOMEM;
goto free_pbc;
}
pkt->largepkt = 1;
pkt->frag_size = frag_size;
- pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
+ if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
+ &addrlimit) ||
+ addrlimit > type_max(typeof(pkt->addrlimit))) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+ pkt->addrlimit = addrlimit;
if (tiddma) {
char *tidsm = (char *)pkt + pktsize;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 49bdd78ac664..3305f2744bfa 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1223,7 +1223,7 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
spin_lock(&rdi->n_qps_lock);
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
spin_unlock(&rdi->n_qps_lock);
- ret = ENOMEM;
+ ret = -ENOMEM;
goto bail_ip;
}
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 2a822b229bd0..1eacd43cb436 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -20,7 +20,7 @@
#include <linux/amd-iommu.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/apic.h>
@@ -121,8 +121,10 @@ struct ivhd_entry {
u8 type;
u16 devid;
u8 flags;
- u32 ext;
- u32 hidh;
+ struct_group(ext_hid,
+ u32 ext;
+ u32 hidh;
+ );
u64 cid;
u8 uidf;
u8 uidl;
@@ -964,7 +966,7 @@ static bool copy_device_table(void)
pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
- old_devtb = (sme_active() && is_kdump_kernel())
+ old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys,
dev_table_size)
: memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
@@ -1377,7 +1379,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
- memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
+ BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
+ memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
hid[ACPIHID_HID_LEN - 1] = '\0';
if (!(*hid)) {
@@ -3032,7 +3035,8 @@ static int __init amd_iommu_init(void)
static bool amd_iommu_sme_check(void)
{
- if (!sme_active() || (boot_cpu_data.x86 != 0x17))
+ if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
+ (boot_cpu_data.x86 != 0x17))
return true;
/* For Fam17h, a specific level of support is required */
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 1722bb161841..9e5da037d949 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -31,6 +31,7 @@
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/io-pgtable.h>
+#include <linux/cc_platform.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -2238,7 +2239,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
* active, because some of those devices (AMD GPUs) don't have the
* encryption bit in their DMA-mask and require remapping.
*/
- if (!mem_encrypt_active() && dev_data->iommu_v2)
+ if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY;
return 0;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index a9e568276c99..13cbeb997cc1 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -17,6 +17,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/gfp.h>
+#include <linux/cc_platform.h>
#include "amd_iommu.h"
@@ -742,7 +743,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
* When memory encryption is active the device is likely not in a
* direct-mapped domain. Forbid using IOMMUv2 functionality for now.
*/
- if (mem_encrypt_active())
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -ENODEV;
if (!amd_iommu_v2_supported())
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3303d707bab4..e80261d17a49 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -25,6 +25,7 @@
#include <linux/property.h>
#include <linux/fsl/mc.h>
#include <linux/module.h>
+#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
@@ -130,7 +131,7 @@ static int __init iommu_subsys_init(void)
else
iommu_set_default_translated(false);
- if (iommu_default_passthrough() && mem_encrypt_active()) {
+ if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
iommu_set_default_translated(false);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index aca7b595c4c7..7038957f4a77 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -115,18 +115,24 @@ config BCM6345_L1_IRQ
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7038_L1_IRQ
- bool
+ tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7120_L2_IRQ
- bool
+ tristate "Broadcom STB 7120-style L2 interrupt controller driver"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
config BRCMSTB_L2_IRQ
- bool
+ tristate "Broadcom STB generic L2 interrupt controller driver"
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
@@ -400,8 +406,9 @@ config IRQ_UNIPHIER_AIDET
Support for the UniPhier AIDET (ARM Interrupt Detector).
config MESON_IRQ_GPIO
- bool "Meson GPIO Interrupt Multiplexer"
- depends on ARCH_MESON
+ tristate "Meson GPIO Interrupt Multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ default ARCH_MESON
select IRQ_DOMAIN_HIERARCHY
help
Support Meson SoC Family GPIO Interrupt Multiplexer
@@ -602,4 +609,12 @@ config APPLE_AIC
Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
such as the M1.
+config MCHP_EIC
+ bool "Microchip External Interrupt Controller"
+ depends on ARCH_AT91 || COMPILE_TEST
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for Microchip External Interrupt Controller.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index f88cbf36a9d2..c1f611cbfbf8 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -116,3 +116,4 @@ obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o
+obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 6fc145aacaf0..3759dc36cc8f 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -245,7 +245,7 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
irq = FIELD_GET(AIC_EVENT_NUM, event);
if (type == AIC_EVENT_TYPE_HW)
- handle_domain_irq(aic_irqc->hw_domain, irq, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain, irq);
else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
aic_handle_ipi(regs);
else if (event != 0)
@@ -392,25 +392,25 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
}
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_PHYS, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL0_PHYS);
if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_VIRT, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL0_VIRT);
if (is_kernel_in_hyp_mode()) {
uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_PHYS, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL02_PHYS);
if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_VIRT, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL02_VIRT);
}
if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
@@ -674,7 +674,7 @@ static void aic_handle_ipi(struct pt_regs *regs)
firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
for_each_set_bit(i, &firing, AIC_NR_SWIPI)
- handle_domain_irq(aic_irqc->ipi_domain, i, regs);
+ generic_handle_domain_irq(aic_irqc->ipi_domain, i);
/*
* No ordering needed here; at worst this just changes the timing of
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 53e0fb0562c1..80906bfec845 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -589,12 +589,7 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
irq = msinr - PCI_MSI_DOORBELL_START;
- if (is_chained)
- generic_handle_domain_irq(armada_370_xp_msi_inner_domain,
- irq);
- else
- handle_domain_irq(armada_370_xp_msi_inner_domain,
- irq, regs);
+ generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq);
}
}
#else
@@ -646,8 +641,8 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
break;
if (irqnr > 1) {
- handle_domain_irq(armada_370_xp_mpic_domain,
- irqnr, regs);
+ generic_handle_domain_irq(armada_370_xp_mpic_domain,
+ irqnr);
continue;
}
@@ -666,7 +661,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
& IPI_DOORBELL_MASK;
for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END)
- handle_domain_irq(ipi_domain, ipi, regs);
+ generic_handle_domain_irq(ipi_domain, ipi);
}
#endif
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
index 58717cd44f99..62ccf2c0c414 100644
--- a/drivers/irqchip/irq-aspeed-vic.c
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -100,7 +100,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
if (stat == 0)
break;
irq += ffs(stat) - 1;
- handle_domain_irq(vic->dom, irq, regs);
+ generic_handle_domain_irq(vic->dom, irq);
}
}
diff --git a/drivers/irqchip/irq-ativic32.c b/drivers/irqchip/irq-ativic32.c
index 476d6024aaf2..223dd2f97d28 100644
--- a/drivers/irqchip/irq-ativic32.c
+++ b/drivers/irqchip/irq-ativic32.c
@@ -5,11 +5,14 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/hardirq.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
#include <nds32_intrinsic.h>
+#include <asm/irq_regs.h>
+
unsigned long wake_mask;
static void ativic32_ack_irq(struct irq_data *data)
@@ -103,10 +106,25 @@ static irq_hw_number_t get_intr_src(void)
- NDS32_VECTOR_offINTERRUPT;
}
-asmlinkage void asm_do_IRQ(struct pt_regs *regs)
+static void ativic32_handle_irq(struct pt_regs *regs)
{
irq_hw_number_t hwirq = get_intr_src();
- handle_domain_irq(root_domain, hwirq, regs);
+ generic_handle_domain_irq(root_domain, hwirq);
+}
+
+/*
+ * TODO: convert nds32 to GENERIC_IRQ_MULTI_HANDLER so that this entry logic
+ * can live in arch code.
+ */
+asmlinkage void asm_do_IRQ(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+
+ irq_enter();
+ old_regs = set_irq_regs(regs);
+ ativic32_handle_irq(regs);
+ set_irq_regs(old_regs);
+ irq_exit();
}
int __init ativic32_init_irq(struct device_node *node, struct device_node *parent)
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 2c999dc310c1..4631f6847953 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -71,7 +71,7 @@ aic_handle(struct pt_regs *regs)
if (!irqstat)
irq_reg_writel(gc, 0, AT91_AIC_EOICR);
else
- handle_domain_irq(aic_domain, irqnr, regs);
+ generic_handle_domain_irq(aic_domain, irqnr);
}
static int aic_retrigger(struct irq_data *d)
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index fb4ad2aaa727..145535bd7560 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -80,7 +80,7 @@ aic5_handle(struct pt_regs *regs)
if (!irqstat)
irq_reg_writel(bgc, 0, AT91_AIC5_EOICR);
else
- handle_domain_irq(aic5_domain, irqnr, regs);
+ generic_handle_domain_irq(aic5_domain, irqnr);
}
static void aic5_mask(struct irq_data *d)
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index adc1556ed332..e94e2882286c 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -246,7 +246,7 @@ static void __exception_irq_entry bcm2835_handle_irq(
u32 hwirq;
while ((hwirq = get_next_armctrl_hwirq()) != ~0)
- handle_domain_irq(intc.domain, hwirq, regs);
+ generic_handle_domain_irq(intc.domain, hwirq);
}
static void bcm2836_chained_handle_irq(struct irq_desc *desc)
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 501facdb4570..51491c3c6fdd 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -143,7 +143,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
if (stat) {
u32 hwirq = ffs(stat) - 1;
- handle_domain_irq(intc.domain, hwirq, regs);
+ generic_handle_domain_irq(intc.domain, hwirq);
}
}
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index e3483789f4df..fd079215c17f 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -132,16 +132,12 @@ static void bcm6345_l1_irq_handle(struct irq_desc *desc)
int base = idx * IRQS_PER_WORD;
unsigned long pending;
irq_hw_number_t hwirq;
- unsigned int irq;
pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
- irq = irq_linear_revmap(intc->domain, base + hwirq);
- if (irq)
- do_IRQ(irq);
- else
+ if (generic_handle_domain_irq(intc->domain, base + hwirq))
spurious_interrupt();
}
}
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index a035c385ca7a..a62b96237b82 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -28,9 +28,6 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/syscore_ops.h>
-#ifdef CONFIG_ARM
-#include <asm/smp_plat.h>
-#endif
#define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
@@ -127,7 +124,7 @@ static void bcm7038_l1_irq_handle(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int idx;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
#else
cpu = intc->cpus[0];
@@ -194,6 +191,7 @@ static void bcm7038_l1_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&intc->lock, flags);
}
+#if defined(CONFIG_MIPS) && defined(CONFIG_SMP)
static int bcm7038_l1_set_affinity(struct irq_data *d,
const struct cpumask *dest,
bool force)
@@ -220,32 +218,6 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
return 0;
}
-
-#ifdef CONFIG_SMP
-static void bcm7038_l1_cpu_offline(struct irq_data *d)
-{
- struct cpumask *mask = irq_data_get_affinity_mask(d);
- int cpu = smp_processor_id();
- cpumask_t new_affinity;
-
- /* This CPU was not on the affinity mask */
- if (!cpumask_test_cpu(cpu, mask))
- return;
-
- if (cpumask_weight(mask) > 1) {
- /*
- * Multiple CPU affinity, remove this CPU from the affinity
- * mask
- */
- cpumask_copy(&new_affinity, mask);
- cpumask_clear_cpu(cpu, &new_affinity);
- } else {
- /* Only CPU, put on the lowest online CPU */
- cpumask_clear(&new_affinity);
- cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
- }
- irq_set_affinity_locked(d, &new_affinity, false);
-}
#endif
static int __init bcm7038_l1_init_one(struct device_node *dn,
@@ -328,7 +300,7 @@ static int bcm7038_l1_suspend(void)
u32 val;
/* Wakeup interrupt should only come from the boot cpu */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
boot_cpu = cpu_logical_map(0);
#else
boot_cpu = 0;
@@ -352,7 +324,7 @@ static void bcm7038_l1_resume(void)
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
boot_cpu = cpu_logical_map(0);
#else
boot_cpu = 0;
@@ -395,9 +367,8 @@ static struct irq_chip bcm7038_l1_irq_chip = {
.name = "bcm7038-l1",
.irq_mask = bcm7038_l1_mask,
.irq_unmask = bcm7038_l1_unmask,
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
.irq_set_affinity = bcm7038_l1_set_affinity,
-#ifdef CONFIG_SMP
- .irq_cpu_offline = bcm7038_l1_cpu_offline,
#endif
#ifdef CONFIG_PM_SLEEP
.irq_set_wake = bcm7038_l1_set_wake,
@@ -416,7 +387,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data);
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+ irqd_set_single_target(irq_get_irq_data(virq));
return 0;
}
@@ -484,4 +455,8 @@ out_free:
return ret;
}
-IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7038_l1)
+IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(bcm7038_l1)
+MODULE_DESCRIPTION("Broadcom STB 7038-style L1/L2 interrupt controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index f23d7651ea84..d80e67a6aad2 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -220,6 +220,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct bcm7120_l2_intc_data *data;
+ struct platform_device *pdev;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
int ret = 0;
@@ -230,7 +231,13 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (!data)
return -ENOMEM;
- data->num_parent_irqs = of_irq_count(dn);
+ pdev = of_find_device_by_node(dn);
+ if (!pdev) {
+ ret = -ENODEV;
+ goto out_free_data;
+ }
+
+ data->num_parent_irqs = platform_irq_count(pdev);
if (data->num_parent_irqs <= 0) {
pr_err("invalid number of parent interrupts\n");
ret = -ENOMEM;
@@ -329,6 +336,7 @@ out_unmap:
if (data->map_base[idx])
iounmap(data->map_base[idx]);
}
+out_free_data:
kfree(data);
return ret;
}
@@ -347,8 +355,9 @@ static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
"BCM3380 L2");
}
-IRQCHIP_DECLARE(bcm7120_l2_intc, "brcm,bcm7120-l2-intc",
- bcm7120_l2_intc_probe_7120);
-
-IRQCHIP_DECLARE(bcm3380_l2_intc, "brcm,bcm3380-l2-intc",
- bcm7120_l2_intc_probe_3380);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7120_l2)
+IRQCHIP_MATCH("brcm,bcm7120-l2-intc", bcm7120_l2_intc_probe_7120)
+IRQCHIP_MATCH("brcm,bcm3380-l2-intc", bcm7120_l2_intc_probe_3380)
+IRQCHIP_PLATFORM_DRIVER_END(bcm7120_l2)
+MODULE_DESCRIPTION("Broadcom STB 7120-style L2 interrupt controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 8e0911561f2d..e4efc08ac594 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -275,16 +275,18 @@ static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
{
return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
}
-IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
-IRQCHIP_DECLARE(brcmstb_hif_spi_l2_intc, "brcm,hif-spi-l2-intc",
- brcmstb_l2_edge_intc_of_init);
-IRQCHIP_DECLARE(brcmstb_upg_aux_aon_l2_intc, "brcm,upg-aux-aon-l2-intc",
- brcmstb_l2_edge_intc_of_init);
static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
struct device_node *parent)
{
return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
}
-IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc",
- brcmstb_l2_lvl_intc_of_init);
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(brcmstb_l2)
+IRQCHIP_MATCH("brcm,l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,hif-spi-l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,upg-aux-aon-l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,bcm7271-l2-intc", brcmstb_l2_lvl_intc_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(brcmstb_l2)
+MODULE_DESCRIPTION("Broadcom STB generic L2 interrupt controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index d0da29aeedc8..77ebe7e47e0e 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -77,14 +77,14 @@ static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
irqstat = readw_relaxed(clps711x_intc->intmr[0]) &
readw_relaxed(clps711x_intc->intsr[0]);
if (irqstat)
- handle_domain_irq(clps711x_intc->domain,
- fls(irqstat) - 1, regs);
+ generic_handle_domain_irq(clps711x_intc->domain,
+ fls(irqstat) - 1);
irqstat = readw_relaxed(clps711x_intc->intmr[1]) &
readw_relaxed(clps711x_intc->intsr[1]);
if (irqstat)
- handle_domain_irq(clps711x_intc->domain,
- fls(irqstat) - 1 + 16, regs);
+ generic_handle_domain_irq(clps711x_intc->domain,
+ fls(irqstat) - 1 + 16);
} while (irqstat);
}
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index ab91afa86755..d36f536506ba 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -138,7 +138,7 @@ static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
if (hwirq == 0)
return 0;
- handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs);
+ generic_handle_domain_irq(root_domain, irq_base + __fls(hwirq));
return 1;
}
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index a1534edef7fa..cb403c960ac0 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -74,8 +74,8 @@ static void csky_mpintc_handler(struct pt_regs *regs)
{
void __iomem *reg_base = this_cpu_read(intcl_reg);
- handle_domain_irq(root_domain,
- readl_relaxed(reg_base + INTCL_RDYIR), regs);
+ generic_handle_domain_irq(root_domain,
+ readl_relaxed(reg_base + INTCL_RDYIR));
}
static void csky_mpintc_enable(struct irq_data *d)
diff --git a/drivers/irqchip/irq-davinci-aintc.c b/drivers/irqchip/irq-davinci-aintc.c
index 810ccc4fe476..123eb7bfc117 100644
--- a/drivers/irqchip/irq-davinci-aintc.c
+++ b/drivers/irqchip/irq-davinci-aintc.c
@@ -73,7 +73,7 @@ davinci_aintc_handle_irq(struct pt_regs *regs)
irqnr >>= 2;
irqnr -= 1;
- handle_domain_irq(davinci_aintc_irq_domain, irqnr, regs);
+ generic_handle_domain_irq(davinci_aintc_irq_domain, irqnr);
}
/* ARM Interrupt Controller Initialization */
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index 276da2772e7f..7482c8ed34b2 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -135,7 +135,7 @@ davinci_cp_intc_handle_irq(struct pt_regs *regs)
return;
}
- handle_domain_irq(davinci_cp_intc_irq_domain, irqnr, regs);
+ generic_handle_domain_irq(davinci_cp_intc_irq_domain, irqnr);
}
static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index fc38d2da11b9..3b0d78aac13b 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -50,7 +50,7 @@ static void __exception_irq_entry digicolor_handle_irq(struct pt_regs *regs)
return;
}
- handle_domain_irq(digicolor_irq_domain, hwirq, regs);
+ generic_handle_domain_irq(digicolor_irq_domain, hwirq);
} while (1);
}
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index a67266e44491..d5c1c750c8d2 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -42,7 +42,7 @@ static void __irq_entry dw_apb_ictl_handle_irq(struct pt_regs *regs)
while (stat) {
u32 hwirq = ffs(stat) - 1;
- handle_domain_irq(d, hwirq, regs);
+ generic_handle_domain_irq(d, hwirq);
stat &= ~BIT(hwirq);
}
}
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index 0bf98425dca5..5cc268880f8e 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -134,7 +134,7 @@ asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *r
while ((status = readl(FT010_IRQ_STATUS(f->base)))) {
irq = ffs(status) - 1;
- handle_domain_irq(f->domain, irq, regs);
+ generic_handle_domain_irq(f->domain, irq);
}
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index fd4e9a37fea6..daec3309b014 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -660,7 +660,7 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
* PSR.I will be restored when we ERET to the
* interrupted context.
*/
- err = handle_domain_nmi(gic_data.domain, irqnr, regs);
+ err = generic_handle_domain_nmi(gic_data.domain, irqnr);
if (err)
gic_deactivate_unhandled(irqnr);
@@ -728,7 +728,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
else
isb();
- if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
+ if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
gic_deactivate_unhandled(irqnr);
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 5f22c9d65e57..b8bb46c65a97 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -369,7 +369,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
this_cpu_write(sgi_intid, irqstat);
}
- handle_domain_irq(gic->domain, irqnr, regs);
+ generic_handle_domain_irq(gic->domain, irqnr);
} while (1);
}
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 058ebaebe2c4..46161f6ff289 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -206,7 +206,7 @@ static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
if (irqnr <= HIP04_MAX_IRQS)
- handle_domain_irq(hip04_data.domain, irqnr, regs);
+ generic_handle_domain_irq(hip04_data.domain, irqnr);
} while (irqnr > HIP04_MAX_IRQS);
}
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
index 37e0749215c7..fb68f8c59fbb 100644
--- a/drivers/irqchip/irq-ixp4xx.c
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -114,7 +114,7 @@ asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
status = __raw_readl(ixi->irqbase + IXP4XX_ICIP);
for_each_set_bit(i, &status, 32)
- handle_domain_irq(ixi->domain, i, regs);
+ generic_handle_domain_irq(ixi->domain, i);
/*
* IXP465/IXP435 has an upper IRQ status register
@@ -122,7 +122,7 @@ asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
if (ixi->is_356) {
status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2);
for_each_set_bit(i, &status, 32)
- handle_domain_irq(ixi->domain, i + 32, regs);
+ generic_handle_domain_irq(ixi->domain, i + 32);
}
}
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index 5e6f6e25f2ae..a29357f39450 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -126,7 +126,7 @@ static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
while (hwirq) {
irq = __ffs(hwirq);
hwirq &= ~BIT(irq);
- handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
+ generic_handle_domain_irq(lpc32xx_mic_irqc->domain, irq);
}
}
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
new file mode 100644
index 000000000000..c726a19837d2
--- /dev/null
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Microchip External Interrupt Controller driver
+ *
+ * Copyright (C) 2021 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define MCHP_EIC_GFCS (0x0)
+#define MCHP_EIC_SCFG(x) (0x4 + (x) * 0x4)
+#define MCHP_EIC_SCFG_EN BIT(16)
+#define MCHP_EIC_SCFG_LVL BIT(9)
+#define MCHP_EIC_SCFG_POL BIT(8)
+
+#define MCHP_EIC_NIRQ (2)
+
+/*
+ * struct mchp_eic - EIC private data structure
+ * @base: base address
+ * @clk: peripheral clock
+ * @domain: irq domain
+ * @irqs: irqs b/w eic and gic
+ * @scfg: backup for scfg registers (necessary for backup and self-refresh mode)
+ * @wakeup_source: wakeup source mask
+ */
+struct mchp_eic {
+ void __iomem *base;
+ struct clk *clk;
+ struct irq_domain *domain;
+ u32 irqs[MCHP_EIC_NIRQ];
+ u32 scfg[MCHP_EIC_NIRQ];
+ u32 wakeup_source;
+};
+
+static struct mchp_eic *eic;
+
+static void mchp_eic_irq_mask(struct irq_data *d)
+{
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp &= ~MCHP_EIC_SCFG_EN;
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ irq_chip_mask_parent(d);
+}
+
+static void mchp_eic_irq_unmask(struct irq_data *d)
+{
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp |= MCHP_EIC_SCFG_EN;
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ irq_chip_unmask_parent(d);
+}
+
+static int mchp_eic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int parent_irq_type;
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp &= ~(MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL);
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ tmp |= MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL;
+ parent_irq_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ tmp |= MCHP_EIC_SCFG_LVL;
+ parent_irq_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ parent_irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ tmp |= MCHP_EIC_SCFG_POL;
+ parent_irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ return irq_chip_set_type_parent(d, parent_irq_type);
+}
+
+static int mchp_eic_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ irq_set_irq_wake(eic->irqs[d->hwirq], on);
+ if (on)
+ eic->wakeup_source |= BIT(d->hwirq);
+ else
+ eic->wakeup_source &= ~BIT(d->hwirq);
+
+ return 0;
+}
+
+static int mchp_eic_irq_suspend(void)
+{
+ unsigned int hwirq;
+
+ for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++)
+ eic->scfg[hwirq] = readl_relaxed(eic->base +
+ MCHP_EIC_SCFG(hwirq));
+
+ if (!eic->wakeup_source)
+ clk_disable_unprepare(eic->clk);
+
+ return 0;
+}
+
+static void mchp_eic_irq_resume(void)
+{
+ unsigned int hwirq;
+
+ if (!eic->wakeup_source)
+ clk_prepare_enable(eic->clk);
+
+ for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++)
+ writel_relaxed(eic->scfg[hwirq], eic->base +
+ MCHP_EIC_SCFG(hwirq));
+}
+
+static struct syscore_ops mchp_eic_syscore_ops = {
+ .suspend = mchp_eic_irq_suspend,
+ .resume = mchp_eic_irq_resume,
+};
+
+static struct irq_chip mchp_eic_chip = {
+ .name = "eic",
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED,
+ .irq_mask = mchp_eic_irq_mask,
+ .irq_unmask = mchp_eic_irq_unmask,
+ .irq_set_type = mchp_eic_irq_set_type,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_wake = mchp_eic_irq_set_wake,
+};
+
+static int mchp_eic_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
+ if (ret || hwirq >= MCHP_EIC_NIRQ)
+ return ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &mchp_eic_chip, eic);
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = GIC_SPI;
+ parent_fwspec.param[1] = eic->irqs[hwirq];
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static const struct irq_domain_ops mchp_eic_domain_ops = {
+ .translate = irq_domain_translate_twocell,
+ .alloc = mchp_eic_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int mchp_eic_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *parent_domain = NULL;
+ int ret, i;
+
+ eic = kzalloc(sizeof(*eic), GFP_KERNEL);
+ if (!eic)
+ return -ENOMEM;
+
+ eic->base = of_iomap(node, 0);
+ if (!eic->base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ ret = -ENODEV;
+ goto unmap;
+ }
+
+ eic->clk = of_clk_get_by_name(node, "pclk");
+ if (IS_ERR(eic->clk)) {
+ ret = PTR_ERR(eic->clk);
+ goto unmap;
+ }
+
+ ret = clk_prepare_enable(eic->clk);
+ if (ret)
+ goto unmap;
+
+ for (i = 0; i < MCHP_EIC_NIRQ; i++) {
+ struct of_phandle_args irq;
+
+ /* Disable it, if any. */
+ writel_relaxed(0UL, eic->base + MCHP_EIC_SCFG(i));
+
+ ret = of_irq_parse_one(node, i, &irq);
+ if (ret)
+ goto clk_unprepare;
+
+ if (WARN_ON(irq.args_count != 3)) {
+ ret = -EINVAL;
+ goto clk_unprepare;
+ }
+
+ eic->irqs[i] = irq.args[1];
+ }
+
+ eic->domain = irq_domain_add_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ,
+ node, &mchp_eic_domain_ops, eic);
+ if (!eic->domain) {
+ pr_err("%pOF: Failed to add domain\n", node);
+ ret = -ENODEV;
+ goto clk_unprepare;
+ }
+
+ register_syscore_ops(&mchp_eic_syscore_ops);
+
+ pr_info("%pOF: EIC registered, nr_irqs %u\n", node, MCHP_EIC_NIRQ);
+
+ return 0;
+
+clk_unprepare:
+ clk_disable_unprepare(eic->clk);
+unmap:
+ iounmap(eic->base);
+free:
+ kfree(eic);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(mchp_eic)
+IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_init)
+IRQCHIP_PLATFORM_DRIVER_END(mchp_eic)
+
+MODULE_DESCRIPTION("Microchip External Interrupt Controller");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea@microchip.com>");
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index e50676ce2ec8..d90ff0b92480 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -436,8 +436,7 @@ static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
.translate = meson_gpio_irq_domain_translate,
};
-static int __init meson_gpio_irq_parse_dt(struct device_node *node,
- struct meson_gpio_irq_controller *ctl)
+static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_irq_controller *ctl)
{
const struct of_device_id *match;
int ret;
@@ -463,8 +462,7 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node,
return 0;
}
-static int __init meson_gpio_irq_of_init(struct device_node *node,
- struct device_node *parent)
+static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *domain, *parent_domain;
struct meson_gpio_irq_controller *ctl;
@@ -521,5 +519,10 @@ free_ctl:
return ret;
}
-IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
- meson_gpio_irq_of_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(meson_gpio_intc)
+IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc)
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:meson-gpio-intc");
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 54c7092cc61d..d02b05a067d9 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -381,24 +381,35 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
spin_unlock_irqrestore(&gic_lock, flags);
}
-static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
+static void gic_all_vpes_irq_cpu_online(void)
{
- struct gic_all_vpes_chip_data *cd;
- unsigned int intr;
+ static const unsigned int local_intrs[] = {
+ GIC_LOCAL_INT_TIMER,
+ GIC_LOCAL_INT_PERFCTR,
+ GIC_LOCAL_INT_FDC,
+ };
+ unsigned long flags;
+ int i;
- intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- cd = irq_data_get_irq_chip_data(d);
+ spin_lock_irqsave(&gic_lock, flags);
- write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
- if (cd->mask)
- write_gic_vl_smask(BIT(intr));
+ for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
+ unsigned int intr = local_intrs[i];
+ struct gic_all_vpes_chip_data *cd;
+
+ cd = &gic_all_vpes_chip_data[intr];
+ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
+ if (cd->mask)
+ write_gic_vl_smask(BIT(intr));
+ }
+
+ spin_unlock_irqrestore(&gic_lock, flags);
}
static struct irq_chip gic_all_vpes_local_irq_controller = {
.name = "MIPS GIC Local",
.irq_mask = gic_mask_local_irq_all_vpes,
.irq_unmask = gic_unmask_local_irq_all_vpes,
- .irq_cpu_online = gic_all_vpes_irq_cpu_online,
};
static void __gic_irq_dispatch(void)
@@ -477,6 +488,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
intr = GIC_HWIRQ_TO_LOCAL(hwirq);
map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+ /*
+ * If adding support for more per-cpu interrupts, keep the the
+ * array in gic_all_vpes_irq_cpu_online() in sync.
+ */
switch (intr) {
case GIC_LOCAL_INT_TIMER:
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
@@ -663,8 +678,8 @@ static int gic_cpu_startup(unsigned int cpu)
/* Clear all local IRQ masks (ie. disable all local interrupts) */
write_gic_vl_rmask(~0);
- /* Invoke irq_cpu_online callbacks to enable desired interrupts */
- irq_cpu_online();
+ /* Enable desired interrupts */
+ gic_all_vpes_irq_cpu_online();
return 0;
}
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 4a74ac7b7c42..83455ca72439 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -230,7 +230,7 @@ static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
if (!(hwirq & SEL_INT_PENDING))
return;
hwirq &= SEL_INT_NUM_MASK;
- handle_domain_irq(icu_data[0].domain, hwirq, regs);
+ generic_handle_domain_irq(icu_data[0].domain, hwirq);
}
static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
@@ -241,7 +241,7 @@ static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
if (!(hwirq & SEL_INT_PENDING))
return;
hwirq &= SEL_INT_NUM_MASK;
- handle_domain_irq(icu_data[0].domain, hwirq, regs);
+ generic_handle_domain_irq(icu_data[0].domain, hwirq);
}
/* MMP (ARMv5) */
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 090bc3f4f7d8..3e7297fc5948 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -347,7 +347,6 @@ builtin_platform_driver(mvebu_icu_subset_driver);
static int mvebu_icu_probe(struct platform_device *pdev)
{
struct mvebu_icu *icu;
- struct resource *res;
int i;
icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu),
@@ -357,8 +356,7 @@ static int mvebu_icu_probe(struct platform_device *pdev)
icu->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- icu->base = devm_ioremap_resource(&pdev->dev, res);
+ icu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(icu->base))
return PTR_ERR(icu->base);
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index dc1cee4b0fe1..870f9866b8da 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -121,14 +121,12 @@ static int mvebu_pic_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct mvebu_pic *pic;
struct irq_chip *irq_chip;
- struct resource *res;
pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
if (!pic)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pic->base = devm_ioremap_resource(&pdev->dev, res);
+ pic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pic->base))
return PTR_ERR(pic->base);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index d1f5740cd575..55cb6b5a686e 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -136,7 +136,7 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
irqnr = __raw_readl(icoll_priv.stat);
__raw_writel(irqnr, icoll_priv.vector);
- handle_domain_irq(icoll_domain, irqnr, regs);
+ generic_handle_domain_irq(icoll_domain, irqnr);
}
static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index b31c4cff4d3a..63bac3f78863 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -37,10 +37,25 @@
static struct irq_domain *nvic_irq_domain;
+static void __nvic_handle_irq(irq_hw_number_t hwirq)
+{
+ generic_handle_domain_irq(nvic_irq_domain, hwirq);
+}
+
+/*
+ * TODO: restructure the ARMv7M entry logic so that this entry logic can live
+ * in arch code.
+ */
asmlinkage void __exception_irq_entry
nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
{
- handle_domain_irq(nvic_irq_domain, hwirq, regs);
+ struct pt_regs *old_regs;
+
+ irq_enter();
+ old_regs = set_irq_regs(regs);
+ __nvic_handle_irq(hwirq);
+ set_irq_regs(old_regs);
+ irq_exit();
}
static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index d360a6eddd6d..dc82162ba763 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -357,7 +357,7 @@ omap_intc_handle_irq(struct pt_regs *regs)
}
irqnr &= ACTIVEIRQ_MASK;
- handle_domain_irq(domain, irqnr, regs);
+ generic_handle_domain_irq(domain, irqnr);
}
static int __init intc_of_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index 03d2366118dd..49b47e787644 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -116,7 +116,7 @@ static void or1k_pic_handle_irq(struct pt_regs *regs)
int irq = -1;
while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
- handle_domain_irq(root_domain, irq, regs);
+ generic_handle_domain_irq(root_domain, irq);
}
static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index b6868f7b805a..17c2c7a07f10 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -42,8 +42,8 @@ __exception_irq_entry orion_handle_irq(struct pt_regs *regs)
gc->mask_cache;
while (stat) {
u32 hwirq = __fls(stat);
- handle_domain_irq(orion_irq_domain,
- gc->irq_base + hwirq, regs);
+ generic_handle_domain_irq(orion_irq_domain,
+ gc->irq_base + hwirq);
stat &= ~(1 << hwirq);
}
}
diff --git a/drivers/irqchip/irq-rda-intc.c b/drivers/irqchip/irq-rda-intc.c
index 960168303b73..9f0144a73777 100644
--- a/drivers/irqchip/irq-rda-intc.c
+++ b/drivers/irqchip/irq-rda-intc.c
@@ -53,7 +53,7 @@ static void __exception_irq_entry rda_handle_irq(struct pt_regs *regs)
while (stat) {
hwirq = __fls(stat);
- handle_domain_irq(rda_irq_domain, hwirq, regs);
+ generic_handle_domain_irq(rda_irq_domain, hwirq);
stat &= ~BIT(hwirq);
}
}
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index 8017f6d32d52..b65bd8878d4f 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -37,7 +37,7 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
break;
#endif
default:
- handle_domain_irq(intc_domain, cause, regs);
+ generic_handle_domain_irq(intc_domain, cause);
break;
}
}
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index dbccc7dafbf8..31c202a1ae62 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -140,8 +140,8 @@ sa1100_handle_irq(struct pt_regs *regs)
if (mask == 0)
break;
- handle_domain_irq(sa1100_normal_irqdomain,
- ffs(mask) - 1, regs);
+ generic_handle_domain_irq(sa1100_normal_irqdomain,
+ ffs(mask) - 1);
} while (1);
}
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 33c76710f845..b7cb2da71888 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -850,7 +850,6 @@ static int stm32_exti_probe(struct platform_device *pdev)
struct irq_domain *parent_domain, *domain;
struct stm32_exti_host_data *host_data;
const struct stm32_exti_drv_data *drv_data;
- struct resource *res;
host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
if (!host_data)
@@ -888,8 +887,7 @@ static int stm32_exti_probe(struct platform_device *pdev)
if (!host_data->chips_data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host_data->base = devm_ioremap_resource(dev, res);
+ host_data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host_data->base))
return PTR_ERR(host_data->base);
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 8a315d6a3399..dd506ebfdacb 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -195,7 +195,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
return;
do {
- handle_domain_irq(irq_ic_data->irq_domain, hwirq, regs);
+ generic_handle_domain_irq(irq_ic_data->irq_domain, hwirq);
hwirq = readl(irq_ic_data->irq_base +
SUN4I_IRQ_VECTOR_REG) >> 2;
} while (hwirq != 0);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 97f454ec376b..8eba08db33b2 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -650,7 +650,6 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
struct device_node *parent_node, *node;
struct ti_sci_inta_irq_domain *inta;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
node = dev_of_node(dev);
@@ -694,8 +693,7 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
return PTR_ERR(inta->global_event);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- inta->base = devm_ioremap_resource(dev, res);
+ inta->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(inta->base))
return PTR_ERR(inta->base);
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 34337a61b1ef..f032db23b30f 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -93,15 +93,13 @@ static int ts4800_ic_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct ts4800_irq_data *data;
struct irq_chip *irq_chip;
- struct resource *res;
int parent_irq;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 75be350cf82f..f2757b6aecc8 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -105,7 +105,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
while ((status = readl(f->base + IRQ_STATUS))) {
irq = ffs(status) - 1;
- handle_domain_irq(f->domain, irq, regs);
+ generic_handle_domain_irq(f->domain, irq);
handled = 1;
}
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 1e1f2d115257..9e3d5561e04e 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -208,7 +208,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
irq = ffs(stat) - 1;
- handle_domain_irq(vic->domain, irq, regs);
+ generic_handle_domain_irq(vic->domain, irq);
handled = 1;
}
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 5bce936af5d9..e17dd3a8c2d5 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -183,7 +183,7 @@ static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
continue;
}
- handle_domain_irq(intc[i].domain, irqnr, regs);
+ generic_handle_domain_irq(intc[i].domain, irqnr);
}
}
diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
index f3ac392d5bc8..0dcbeb1a05a1 100644
--- a/drivers/irqchip/irq-wpcm450-aic.c
+++ b/drivers/irqchip/irq-wpcm450-aic.c
@@ -69,7 +69,7 @@ static void __exception_irq_entry wpcm450_aic_handle_irq(struct pt_regs *regs)
/* Read IPER to signal that nIRQ can be de-asserted */
hwirq = readl(aic->regs + AIC_IPER) / 4;
- handle_domain_irq(aic->domain, hwirq, regs);
+ generic_handle_domain_irq(aic->domain, hwirq);
}
static void wpcm450_aic_eoi(struct irq_data *d)
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index 84163f1ebfcf..7a72620fc478 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -50,7 +50,7 @@ static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
while (readl(zevio_irq_io + IO_STATUS)) {
irqnr = readl(zevio_irq_io + IO_CURRENT);
- handle_domain_irq(zevio_irq_domain, irqnr, regs);
+ generic_handle_domain_irq(zevio_irq_domain, irqnr);
}
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e501cb03f211..bd087cca1c1d 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1994,14 +1994,14 @@ setup_hw(struct hfc_pci *hc)
pci_set_master(hc->pdev);
if (!hc->irq) {
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
- return 1;
+ return -EINVAL;
}
hc->hw.pci_io =
(char __iomem *)(unsigned long)hc->pdev->resource[1].start;
if (!hc->hw.pci_io) {
printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
- return 1;
+ return -ENOMEM;
}
/* Allocate memory for FIFOS */
/* the memory needs to be on a 32k boundary within the first 4G */
@@ -2012,7 +2012,7 @@ setup_hw(struct hfc_pci *hc)
if (!buffer) {
printk(KERN_WARNING
"HFC-PCI: Error allocating memory for FIFO!\n");
- return 1;
+ return -ENOMEM;
}
hc->hw.fifos = buffer;
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
@@ -2022,7 +2022,7 @@ setup_hw(struct hfc_pci *hc)
"HFC-PCI: Error in ioremap for PCI!\n");
dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
hc->hw.dmahandle);
- return 1;
+ return -ENOMEM;
}
printk(KERN_INFO
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index 185e17055317..6fe9d700dfef 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -207,7 +207,7 @@ static ssize_t flash_fault_show(struct device *dev,
mask <<= 1;
}
- return sprintf(buf, "%s\n", buf);
+ return strlen(strcat(buf, "\n"));
}
static DEVICE_ATTR_RO(flash_fault);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 4e7b78a84149..072491d3e17b 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -157,7 +157,6 @@ EXPORT_SYMBOL_GPL(led_trigger_read);
/* Caller must ensure led_cdev->trigger_lock held */
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
{
- unsigned long flags;
char *event = NULL;
char *envp[2];
const char *name;
@@ -171,10 +170,13 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
/* Remove any existing trigger */
if (led_cdev->trigger) {
- write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
- list_del(&led_cdev->trig_list);
- write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock,
- flags);
+ spin_lock(&led_cdev->trigger->leddev_list_lock);
+ list_del_rcu(&led_cdev->trig_list);
+ spin_unlock(&led_cdev->trigger->leddev_list_lock);
+
+ /* ensure it's no longer visible on the led_cdevs list */
+ synchronize_rcu();
+
cancel_work_sync(&led_cdev->set_brightness_work);
led_stop_software_blink(led_cdev);
if (led_cdev->trigger->deactivate)
@@ -186,9 +188,9 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
led_set_brightness(led_cdev, LED_OFF);
}
if (trig) {
- write_lock_irqsave(&trig->leddev_list_lock, flags);
- list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
- write_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ spin_lock(&trig->leddev_list_lock);
+ list_add_tail_rcu(&led_cdev->trig_list, &trig->led_cdevs);
+ spin_unlock(&trig->leddev_list_lock);
led_cdev->trigger = trig;
if (trig->activate)
@@ -223,9 +225,10 @@ err_add_groups:
trig->deactivate(led_cdev);
err_activate:
- write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
- list_del(&led_cdev->trig_list);
- write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ spin_lock(&led_cdev->trigger->leddev_list_lock);
+ list_del_rcu(&led_cdev->trig_list);
+ spin_unlock(&led_cdev->trigger->leddev_list_lock);
+ synchronize_rcu();
led_cdev->trigger = NULL;
led_cdev->trigger_data = NULL;
led_set_brightness(led_cdev, LED_OFF);
@@ -285,7 +288,7 @@ int led_trigger_register(struct led_trigger *trig)
struct led_classdev *led_cdev;
struct led_trigger *_trig;
- rwlock_init(&trig->leddev_list_lock);
+ spin_lock_init(&trig->leddev_list_lock);
INIT_LIST_HEAD(&trig->led_cdevs);
down_write(&triggers_list_lock);
@@ -378,15 +381,14 @@ void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
- unsigned long flags;
if (!trig)
return;
- read_lock_irqsave(&trig->leddev_list_lock, flags);
- list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
+ rcu_read_lock();
+ list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- read_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@@ -397,20 +399,19 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int invert)
{
struct led_classdev *led_cdev;
- unsigned long flags;
if (!trig)
return;
- read_lock_irqsave(&trig->leddev_list_lock, flags);
- list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
invert);
else
led_blink_set(led_cdev, delay_on, delay_off);
}
- read_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ rcu_read_unlock();
}
void led_trigger_blink(struct led_trigger *trig,
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 1f1d57288085..dc6816d36d06 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -64,6 +64,7 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
+ depends on !PREEMPT_RT
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index fe63d5ee201b..d33913d523c1 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -848,7 +848,8 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd)
cmd->read = cmd->info.devaddr & 0x01;
switch(cmd->info.type) {
case SMU_I2C_TRANSFER_SIMPLE:
- memset(&cmd->info.sublen, 0, 4);
+ cmd->info.sublen = 0;
+ memset(cmd->info.subaddr, 0, sizeof(cmd->info.subaddr));
break;
case SMU_I2C_TRANSFER_COMBINED:
cmd->info.devaddr &= 0xfe;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c9fc06c7e685..d9cd3606040e 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -8,6 +8,18 @@ menuconfig MAILBOX
if MAILBOX
+config APPLE_MAILBOX
+ tristate "Apple Mailbox driver"
+ depends on ARCH_APPLE || (ARM64 && COMPILE_TEST)
+ default ARCH_APPLE
+ help
+ Apple SoCs have various co-processors required for certain
+ peripherals to work (NVMe, display controller, etc.). This
+ driver adds support for the mailbox controller used to
+ communicate with those.
+
+ Say Y here if you have a Apple SoC.
+
config ARM_MHU
tristate "ARM MHU Mailbox"
depends on ARM_AMBA
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index c2089f04887e..338cc05e5431 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -58,3 +58,5 @@ obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
+
+obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
new file mode 100644
index 000000000000..72942002a54a
--- /dev/null
+++ b/drivers/mailbox/apple-mailbox.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple mailbox driver
+ *
+ * Copyright (C) 2021 The Asahi Linux Contributors
+ *
+ * This driver adds support for two mailbox variants (called ASC and M3 by
+ * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
+ * exchange 64+32 bit messages between the main CPU and a co-processor.
+ * Various coprocessors implement different IPC protocols based on these simple
+ * messages and shared memory buffers.
+ *
+ * Both the main CPU and the co-processor see the same set of registers but
+ * the first FIFO (A2I) is always used to transfer messages from the application
+ * processor (us) to the I/O processor and the second one (I2A) for the
+ * other direction.
+ */
+
+#include <linux/apple-mailbox.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
+#define APPLE_ASC_MBOX_A2I_SEND0 0x800
+#define APPLE_ASC_MBOX_A2I_SEND1 0x808
+#define APPLE_ASC_MBOX_A2I_RECV0 0x810
+#define APPLE_ASC_MBOX_A2I_RECV1 0x818
+
+#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
+#define APPLE_ASC_MBOX_I2A_SEND0 0x820
+#define APPLE_ASC_MBOX_I2A_SEND1 0x828
+#define APPLE_ASC_MBOX_I2A_RECV0 0x830
+#define APPLE_ASC_MBOX_I2A_RECV1 0x838
+
+#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_M3_MBOX_A2I_CONTROL 0x50
+#define APPLE_M3_MBOX_A2I_SEND0 0x60
+#define APPLE_M3_MBOX_A2I_SEND1 0x68
+#define APPLE_M3_MBOX_A2I_RECV0 0x70
+#define APPLE_M3_MBOX_A2I_RECV1 0x78
+
+#define APPLE_M3_MBOX_I2A_CONTROL 0x80
+#define APPLE_M3_MBOX_I2A_SEND0 0x90
+#define APPLE_M3_MBOX_I2A_SEND1 0x98
+#define APPLE_M3_MBOX_I2A_RECV0 0xa0
+#define APPLE_M3_MBOX_I2A_RECV1 0xa8
+
+#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
+#define APPLE_M3_MBOX_IRQ_ACK 0x4c
+#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
+#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
+#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
+#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
+
+#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
+#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
+#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
+#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
+#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
+
+struct apple_mbox_hw {
+ unsigned int control_full;
+ unsigned int control_empty;
+
+ unsigned int a2i_control;
+ unsigned int a2i_send0;
+ unsigned int a2i_send1;
+
+ unsigned int i2a_control;
+ unsigned int i2a_recv0;
+ unsigned int i2a_recv1;
+
+ bool has_irq_controls;
+ unsigned int irq_enable;
+ unsigned int irq_ack;
+ unsigned int irq_bit_recv_not_empty;
+ unsigned int irq_bit_send_empty;
+};
+
+struct apple_mbox {
+ void __iomem *regs;
+ const struct apple_mbox_hw *hw;
+
+ int irq_recv_not_empty;
+ int irq_send_empty;
+
+ struct mbox_chan chan;
+
+ struct device *dev;
+ struct mbox_controller controller;
+};
+
+static const struct of_device_id apple_mbox_of_match[];
+
+static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
+
+ return !(mbox_ctrl & apple_mbox->hw->control_full);
+}
+
+static int apple_mbox_hw_send(struct apple_mbox *apple_mbox,
+ struct apple_mbox_msg *msg)
+{
+ if (!apple_mbox_hw_can_send(apple_mbox))
+ return -EBUSY;
+
+ dev_dbg(apple_mbox->dev, "> TX %016llx %08x\n", msg->msg0, msg->msg1);
+
+ writeq_relaxed(msg->msg0, apple_mbox->regs + apple_mbox->hw->a2i_send0);
+ writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg->msg1),
+ apple_mbox->regs + apple_mbox->hw->a2i_send1);
+
+ return 0;
+}
+
+static bool apple_mbox_hw_can_recv(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_control);
+
+ return !(mbox_ctrl & apple_mbox->hw->control_empty);
+}
+
+static int apple_mbox_hw_recv(struct apple_mbox *apple_mbox,
+ struct apple_mbox_msg *msg)
+{
+ if (!apple_mbox_hw_can_recv(apple_mbox))
+ return -ENOMSG;
+
+ msg->msg0 = readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv0);
+ msg->msg1 = FIELD_GET(
+ APPLE_MBOX_MSG1_MSG,
+ readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv1));
+
+ dev_dbg(apple_mbox->dev, "< RX %016llx %08x\n", msg->msg0, msg->msg1);
+
+ return 0;
+}
+
+static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ struct apple_mbox_msg *msg = data;
+ int ret;
+
+ ret = apple_mbox_hw_send(apple_mbox, msg);
+ if (ret)
+ return ret;
+
+ /*
+ * The interrupt is level triggered and will keep firing as long as the
+ * FIFO is empty. It will also keep firing if the FIFO was empty
+ * at any point in the past until it has been acknowledged at the
+ * mailbox level. By acknowledging it here we can ensure that we will
+ * only get the interrupt once the FIFO has been cleared again.
+ * If the FIFO is already empty before the ack it will fire again
+ * immediately after the ack.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_send_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_ack);
+ }
+ enable_irq(apple_mbox->irq_send_empty);
+
+ return 0;
+}
+
+static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+
+ /*
+ * We don't need to acknowledge the interrupt at the mailbox level
+ * here even if supported by the hardware. It will keep firing but that
+ * doesn't matter since it's disabled at the main interrupt controller.
+ * apple_mbox_chan_send_data will acknowledge it before enabling
+ * it at the main controller again.
+ */
+ disable_irq_nosync(apple_mbox->irq_send_empty);
+ mbox_chan_txdone(&apple_mbox->chan, 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+ struct apple_mbox_msg msg;
+
+ while (apple_mbox_hw_recv(apple_mbox, &msg) == 0)
+ mbox_chan_received_data(&apple_mbox->chan, (void *)&msg);
+
+ /*
+ * The interrupt will keep firing even if there are no more messages
+ * unless we also acknowledge it at the mailbox level here.
+ * There's no race if a message comes in between the check in the while
+ * loop above and the ack below: If a new messages arrives inbetween
+ * those two the interrupt will just fire again immediately after the
+ * ack since it's level triggered.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_ack);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int apple_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+
+ /*
+ * Only some variants of this mailbox HW provide interrupt control
+ * at the mailbox level. We therefore need to handle enabling/disabling
+ * interrupts at the main interrupt controller anyway for hardware that
+ * doesn't. Just always keep the interrupts we care about enabled at
+ * the mailbox level so that both hardware revisions behave almost
+ * the same.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty |
+ apple_mbox->hw->irq_bit_send_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_enable);
+ }
+
+ enable_irq(apple_mbox->irq_recv_not_empty);
+ return 0;
+}
+
+static void apple_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+
+ disable_irq(apple_mbox->irq_recv_not_empty);
+}
+
+static const struct mbox_chan_ops apple_mbox_ops = {
+ .send_data = apple_mbox_chan_send_data,
+ .startup = apple_mbox_chan_startup,
+ .shutdown = apple_mbox_chan_shutdown,
+};
+
+static struct mbox_chan *apple_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ if (args->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ return &mbox->chans[0];
+}
+
+static int apple_mbox_probe(struct platform_device *pdev)
+{
+ int ret;
+ const struct of_device_id *match;
+ char *irqname;
+ struct apple_mbox *mbox;
+ struct device *dev = &pdev->dev;
+
+ match = of_match_node(apple_mbox_of_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+ if (!match->data)
+ return -EINVAL;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->dev = dev;
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->regs))
+ return PTR_ERR(mbox->regs);
+
+ mbox->hw = match->data;
+ mbox->irq_recv_not_empty =
+ platform_get_irq_byname(pdev, "recv-not-empty");
+ if (mbox->irq_recv_not_empty < 0)
+ return -ENODEV;
+
+ mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
+ if (mbox->irq_send_empty < 0)
+ return -ENODEV;
+
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = &mbox->chan;
+ mbox->controller.ops = &apple_mbox_ops;
+ mbox->controller.txdone_irq = true;
+ mbox->controller.of_xlate = apple_mbox_of_xlate;
+ mbox->chan.con_priv = mbox;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dev, mbox->irq_recv_not_empty, NULL,
+ apple_mbox_recv_irq,
+ IRQF_NO_AUTOEN | IRQF_ONESHOT, irqname,
+ mbox);
+ if (ret)
+ return ret;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_send_empty,
+ apple_mbox_send_empty_irq, IRQF_NO_AUTOEN,
+ irqname, mbox);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mbox->controller);
+}
+
+static const struct apple_mbox_hw apple_mbox_asc_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
+static const struct apple_mbox_hw apple_mbox_m3_hw = {
+ .control_full = APPLE_M3_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
+
+ .has_irq_controls = true,
+ .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
+ .irq_ack = APPLE_M3_MBOX_IRQ_ACK,
+ .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
+ .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
+};
+
+static const struct of_device_id apple_mbox_of_match[] = {
+ { .compatible = "apple,t8103-asc-mailbox", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,t8103-m3-mailbox", .data = &apple_mbox_m3_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
+
+static struct platform_driver apple_mbox_driver = {
+ .driver = {
+ .name = "apple-mailbox",
+ .of_match_table = apple_mbox_of_match,
+ },
+ .probe = apple_mbox_probe,
+};
+module_platform_driver(apple_mbox_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple Mailbox driver");
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index 86b7ce3549c5..fbfd0202047c 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -137,7 +137,6 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret = 0;
- struct resource *iomem;
struct bcm2835_mbox *mbox;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
@@ -153,8 +152,7 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
return ret;
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
index 395ddc250828..e41bd2f5ea46 100644
--- a/drivers/mailbox/hi3660-mailbox.c
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -240,7 +240,6 @@ static int hi3660_mbox_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct hi3660_mbox *mbox;
struct mbox_chan *chan;
- struct resource *res;
unsigned long ch;
int err;
@@ -248,8 +247,7 @@ static int hi3660_mbox_probe(struct platform_device *pdev)
if (!mbox)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mbox->base = devm_ioremap_resource(dev, res);
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->base))
return PTR_ERR(mbox->base);
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index 560cd09538b1..fca61f5312d9 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -264,7 +264,6 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct hi6220_mbox *mbox;
- struct resource *res;
int i, err;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
@@ -287,15 +286,13 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
if (mbox->irq < 0)
return mbox->irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mbox->ipc = devm_ioremap_resource(dev, res);
+ mbox->ipc = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->ipc)) {
dev_err(dev, "ioremap ipc failed\n");
return PTR_ERR(mbox->ipc);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mbox->base = devm_ioremap_resource(dev, res);
+ mbox->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(mbox->base)) {
dev_err(dev, "ioremap buffer failed\n");
return PTR_ERR(mbox->base);
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 0ce75c6b36b6..ffe36a6bef9e 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/s4.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -18,6 +19,8 @@
#define IMX_MU_CHANS 16
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
+/* TX0/RX0 */
+#define IMX_MU_S4_CHANS 2
#define IMX_MU_CHAN_NAME_SIZE 20
enum imx_mu_chan_type {
@@ -47,6 +50,11 @@ struct imx_sc_rpc_msg_max {
u32 data[7];
};
+struct imx_s4_rpc_msg_max {
+ struct imx_s4_rpc_msg hdr;
+ u32 data[254];
+};
+
struct imx_mu_con_priv {
unsigned int idx;
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
@@ -58,6 +66,7 @@ struct imx_mu_con_priv {
struct imx_mu_priv {
struct device *dev;
void __iomem *base;
+ void *msg;
spinlock_t xcr_lock; /* control register lock */
struct mbox_controller mbox;
@@ -75,7 +84,8 @@ struct imx_mu_priv {
enum imx_mu_type {
IMX_MU_V1,
- IMX_MU_V2,
+ IMX_MU_V2 = BIT(1),
+ IMX_MU_V2_S4 = BIT(15),
};
struct imx_mu_dcfg {
@@ -89,18 +99,18 @@ struct imx_mu_dcfg {
u32 xCR[4]; /* Control Registers */
};
-#define IMX_MU_xSR_GIPn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
-#define IMX_MU_xSR_RFn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
-#define IMX_MU_xSR_TEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+#define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Enable */
-#define IMX_MU_xCR_GIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+#define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
/* Receive Interrupt Enable */
-#define IMX_MU_xCR_RIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
/* Transmit Interrupt Enable */
-#define IMX_MU_xCR_TIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Request */
-#define IMX_MU_xCR_GIRn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
+#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -167,14 +177,22 @@ static int imx_mu_generic_rx(struct imx_mu_priv *priv,
return 0;
}
-static int imx_mu_scu_tx(struct imx_mu_priv *priv,
- struct imx_mu_con_priv *cp,
- void *data)
+static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
{
- struct imx_sc_rpc_msg_max *msg = data;
u32 *arg = data;
int i, ret;
u32 xsr;
+ u32 size, max_size, num_tr;
+
+ if (priv->dcfg->type & IMX_MU_V2_S4) {
+ size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
+ max_size = sizeof(struct imx_s4_rpc_msg_max);
+ num_tr = 8;
+ } else {
+ size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
+ max_size = sizeof(struct imx_sc_rpc_msg_max);
+ num_tr = 4;
+ }
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -183,27 +201,27 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
* sizeof yields bytes.
*/
- if (msg->hdr.size > sizeof(*msg) / 4) {
+ if (size > max_size / 4) {
/*
* The real message size can be different to
- * struct imx_sc_rpc_msg_max size
+ * struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size
*/
- dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
+ dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2);
return -EINVAL;
}
- for (i = 0; i < 4 && i < msg->hdr.size; i++)
- imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % 4) * 4);
- for (; i < msg->hdr.size; i++) {
+ for (i = 0; i < num_tr && i < size; i++)
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
+ for (; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
xsr,
- xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % 4),
+ xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr),
0, 100);
if (ret) {
dev_err(priv->dev, "Send data index: %d timeout\n", i);
return ret;
}
- imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % 4) * 4);
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
}
imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
@@ -216,23 +234,32 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
return 0;
}
-static int imx_mu_scu_rx(struct imx_mu_priv *priv,
- struct imx_mu_con_priv *cp)
+static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
{
- struct imx_sc_rpc_msg_max msg;
- u32 *data = (u32 *)&msg;
+ u32 *data;
int i, ret;
u32 xsr;
+ u32 size, max_size;
+
+ data = (u32 *)priv->msg;
imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
*data++ = imx_mu_read(priv, priv->dcfg->xRR);
- if (msg.hdr.size > sizeof(msg) / 4) {
- dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
+ if (priv->dcfg->type & IMX_MU_V2_S4) {
+ size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size;
+ max_size = sizeof(struct imx_s4_rpc_msg_max);
+ } else {
+ size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size;
+ max_size = sizeof(struct imx_sc_rpc_msg_max);
+ }
+
+ if (size > max_size / 4) {
+ dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2);
return -EINVAL;
}
- for (i = 1; i < msg.hdr.size; i++) {
+ for (i = 1; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0, 100);
if (ret) {
@@ -243,7 +270,7 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv,
}
imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
- mbox_chan_received_data(cp->chan, (void *)&msg);
+ mbox_chan_received_data(cp->chan, (void *)priv->msg);
return 0;
}
@@ -394,8 +421,8 @@ static const struct mbox_chan_ops imx_mu_ops = {
.shutdown = imx_mu_shutdown,
};
-static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *sp)
+static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
{
u32 type, idx, chan;
@@ -478,11 +505,12 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
}
-static void imx_mu_init_scu(struct imx_mu_priv *priv)
+static void imx_mu_init_specific(struct imx_mu_priv *priv)
{
unsigned int i;
+ int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
- for (i = 0; i < IMX_MU_SCU_CHANS; i++) {
+ for (i = 0; i < num_chans; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
cp->idx = i < 2 ? 0 : i - 2;
@@ -493,8 +521,8 @@ static void imx_mu_init_scu(struct imx_mu_priv *priv)
"imx_mu_chan[%i-%i]", cp->type, cp->idx);
}
- priv->mbox.num_chans = IMX_MU_SCU_CHANS;
- priv->mbox.of_xlate = imx_mu_scu_xlate;
+ priv->mbox.num_chans = num_chans;
+ priv->mbox.of_xlate = imx_mu_specific_xlate;
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
@@ -508,6 +536,7 @@ static int imx_mu_probe(struct platform_device *pdev)
struct imx_mu_priv *priv;
const struct imx_mu_dcfg *dcfg;
int ret;
+ u32 size;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -528,6 +557,15 @@ static int imx_mu_probe(struct platform_device *pdev)
return -EINVAL;
priv->dcfg = dcfg;
+ if (priv->dcfg->type & IMX_MU_V2_S4)
+ size = sizeof(struct imx_s4_rpc_msg_max);
+ else
+ size = sizeof(struct imx_sc_rpc_msg_max);
+
+ priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (IS_ERR(priv->msg))
+ return PTR_ERR(priv->msg);
+
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
if (PTR_ERR(priv->clk) != -ENOENT)
@@ -623,10 +661,21 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
.xCR = {0x110, 0x114, 0x120, 0x128},
};
+static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
+ .tx = imx_mu_specific_tx,
+ .rx = imx_mu_specific_rx,
+ .init = imx_mu_init_specific,
+ .type = IMX_MU_V2 | IMX_MU_V2_S4,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {0xC, 0x118, 0x124, 0x12C},
+ .xCR = {0x110, 0x114, 0x120, 0x128},
+};
+
static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
- .tx = imx_mu_scu_tx,
- .rx = imx_mu_scu_rx,
- .init = imx_mu_init_scu,
+ .tx = imx_mu_specific_tx,
+ .rx = imx_mu_specific_rx,
+ .init = imx_mu_init_specific,
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
@@ -637,6 +686,7 @@ static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
{ .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
{ .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
+ { .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
{ .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ },
};
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
index 75282666fb06..afb320e9d69c 100644
--- a/drivers/mailbox/mailbox-altera.c
+++ b/drivers/mailbox/mailbox-altera.c
@@ -285,7 +285,6 @@ static const struct mbox_chan_ops altera_mbox_ops = {
static int altera_mbox_probe(struct platform_device *pdev)
{
struct altera_mbox *mbox;
- struct resource *regs;
struct mbox_chan *chans;
int ret;
@@ -299,9 +298,7 @@ static int altera_mbox_probe(struct platform_device *pdev)
if (!chans)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- mbox->mbox_base = devm_ioremap_resource(&pdev->dev, regs);
+ mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->mbox_base))
return PTR_ERR(mbox->mbox_base);
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index ab3a6ab762d3..823061dd8c8e 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -408,7 +408,6 @@ static int sti_mbox_probe(struct platform_device *pdev)
struct sti_mbox_device *mdev;
struct device_node *np = pdev->dev.of_node;
struct mbox_chan *chans;
- struct resource *res;
int irq;
int ret;
@@ -425,8 +424,7 @@ static int sti_mbox_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mdev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->base = devm_ioremap_resource(&pdev->dev, res);
+ mdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->base))
return PTR_ERR(mdev->base);
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index 5b3a2dcd5955..946ea773ec33 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -170,7 +170,6 @@ static const struct mbox_chan_ops slimpro_mbox_ops = {
static int slimpro_mbox_probe(struct platform_device *pdev)
{
struct slimpro_mbox *ctx;
- struct resource *regs;
void __iomem *mb_base;
int rc;
int i;
@@ -181,8 +180,7 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mb_base = devm_ioremap_resource(&pdev->dev, regs);
+ mb_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mb_base))
return PTR_ERR(mb_base);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 64175a893312..a8845b162dbf 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -195,7 +195,6 @@ static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
struct cmdq_task_cb *cb = &task->pkt->async_cb;
struct cmdq_cb_data data;
- WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
data.sta = sta;
data.data = cb->data;
data.pkt = task->pkt;
@@ -525,21 +524,20 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
static int cmdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct cmdq *cmdq;
int err, i;
struct gce_plat *plat_data;
struct device_node *phandle = dev->of_node;
struct device_node *node;
int alias_id = 0;
- char clk_name[4] = "gce";
+ static const char * const clk_name = "gce";
+ static const char * const clk_names[] = { "gce0", "gce1" };
cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cmdq->base = devm_ioremap_resource(dev, res);
+ cmdq->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cmdq->base))
return PTR_ERR(cmdq->base);
@@ -570,12 +568,9 @@ static int cmdq_probe(struct platform_device *pdev)
if (cmdq->gce_num > 1) {
for_each_child_of_node(phandle->parent, node) {
- char clk_id[8];
-
alias_id = of_alias_get_id(node, clk_name);
- if (alias_id < cmdq->gce_num) {
- snprintf(clk_id, sizeof(clk_id), "%s%d", clk_name, alias_id);
- cmdq->clocks[alias_id].id = clk_id;
+ if (alias_id >= 0 && alias_id < cmdq->gce_num) {
+ cmdq->clocks[alias_id].id = clk_names[alias_id];
cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
dev_err(dev, "failed to get gce clk: %d\n", alias_id);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 7295e3835e30..58f3d569f095 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -699,7 +699,6 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
static int omap_mbox_probe(struct platform_device *pdev)
{
- struct resource *mem;
int ret;
struct mbox_chan *chnls;
struct omap_mbox **list, *mbox, *mboxblk;
@@ -776,8 +775,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
if (!mdev)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
+ mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->mbox_base))
return PTR_ERR(mdev->mbox_base);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0296558f9e22..887a3704c12e 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -52,6 +52,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
@@ -62,31 +63,48 @@
#define MBOX_IRQ_NAME "pcc-mbox"
-static struct mbox_chan *pcc_mbox_channels;
-
-/* Array of cached virtual address for doorbell registers */
-static void __iomem **pcc_doorbell_vaddr;
-/* Array of cached virtual address for doorbell ack registers */
-static void __iomem **pcc_doorbell_ack_vaddr;
-/* Array of doorbell interrupts */
-static int *pcc_doorbell_irq;
+/**
+ * struct pcc_chan_reg - PCC register bundle
+ *
+ * @vaddr: cached virtual address for this register
+ * @gas: pointer to the generic address structure for this register
+ * @preserve_mask: bitmask to preserve when writing to this register
+ * @set_mask: bitmask to set when writing to this register
+ * @status_mask: bitmask to determine and/or update the status for this register
+ */
+struct pcc_chan_reg {
+ void __iomem *vaddr;
+ struct acpi_generic_address *gas;
+ u64 preserve_mask;
+ u64 set_mask;
+ u64 status_mask;
+};
-static struct mbox_controller pcc_mbox_ctrl = {};
/**
- * get_pcc_channel - Given a PCC subspace idx, get
- * the respective mbox_channel.
- * @id: PCC subspace index.
+ * struct pcc_chan_info - PCC channel specific information
*
- * Return: ERR_PTR(errno) if error, else pointer
- * to mbox channel.
+ * @chan: PCC channel information with Shared Memory Region info
+ * @db: PCC register bundle for the doorbell register
+ * @plat_irq_ack: PCC register bundle for the platform interrupt acknowledge
+ * register
+ * @cmd_complete: PCC register bundle for the command complete check register
+ * @cmd_update: PCC register bundle for the command complete update register
+ * @error: PCC register bundle for the error status register
+ * @plat_irq: platform interrupt
*/
-static struct mbox_chan *get_pcc_channel(int id)
-{
- if (id < 0 || id >= pcc_mbox_ctrl.num_chans)
- return ERR_PTR(-ENOENT);
+struct pcc_chan_info {
+ struct pcc_mbox_chan chan;
+ struct pcc_chan_reg db;
+ struct pcc_chan_reg plat_irq_ack;
+ struct pcc_chan_reg cmd_complete;
+ struct pcc_chan_reg cmd_update;
+ struct pcc_chan_reg error;
+ int plat_irq;
+};
- return &pcc_mbox_channels[id];
-}
+#define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan)
+static struct pcc_chan_info *chan_info;
+static int pcc_chan_count;
/*
* PCC can be used with perf critical drivers such as CPPC
@@ -96,10 +114,8 @@ static struct mbox_chan *get_pcc_channel(int id)
* The below read_register and write_registers are used to read and
* write from perf critical registers such as PCC doorbell register
*/
-static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
+static void read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
{
- int ret_val = 0;
-
switch (bit_width) {
case 8:
*val = readb(vaddr);
@@ -113,19 +129,11 @@ static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
case 64:
*val = readq(vaddr);
break;
- default:
- pr_debug("Error: Cannot read register of %u bit width",
- bit_width);
- ret_val = -EFAULT;
- break;
}
- return ret_val;
}
-static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
+static void write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
{
- int ret_val = 0;
-
switch (bit_width) {
case 8:
writeb(val, vaddr);
@@ -139,13 +147,54 @@ static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
case 64:
writeq(val, vaddr);
break;
- default:
- pr_debug("Error: Cannot write register of %u bit width",
- bit_width);
- ret_val = -EFAULT;
- break;
}
- return ret_val;
+}
+
+static int pcc_chan_reg_read(struct pcc_chan_reg *reg, u64 *val)
+{
+ int ret = 0;
+
+ if (!reg->gas) {
+ *val = 0;
+ return 0;
+ }
+
+ if (reg->vaddr)
+ read_register(reg->vaddr, val, reg->gas->bit_width);
+ else
+ ret = acpi_read(val, reg->gas);
+
+ return ret;
+}
+
+static int pcc_chan_reg_write(struct pcc_chan_reg *reg, u64 val)
+{
+ int ret = 0;
+
+ if (!reg->gas)
+ return 0;
+
+ if (reg->vaddr)
+ write_register(reg->vaddr, val, reg->gas->bit_width);
+ else
+ ret = acpi_write(val, reg->gas);
+
+ return ret;
+}
+
+static int pcc_chan_reg_read_modify_write(struct pcc_chan_reg *reg)
+{
+ int ret = 0;
+ u64 val;
+
+ ret = pcc_chan_reg_read(reg, &val);
+ if (ret)
+ return ret;
+
+ val &= reg->preserve_mask;
+ val |= reg->set_mask;
+
+ return pcc_chan_reg_write(reg, val);
}
/**
@@ -174,42 +223,42 @@ static int pcc_map_interrupt(u32 interrupt, u32 flags)
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
+ * @irq: interrupt number
+ * @p: data/cookie passed from the caller to identify the channel
+ *
+ * Returns: IRQ_HANDLED if interrupt is handled or IRQ_NONE if not
*/
static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
- struct acpi_generic_address *doorbell_ack;
- struct acpi_pcct_hw_reduced *pcct_ss;
+ struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
- u64 doorbell_ack_preserve;
- u64 doorbell_ack_write;
- u64 doorbell_ack_val;
+ u64 val;
int ret;
- pcct_ss = chan->con_priv;
+ pchan = chan->con_priv;
- mbox_chan_received_data(chan, NULL);
+ ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (ret)
+ return IRQ_NONE;
- if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
- struct acpi_pcct_hw_reduced_type2 *pcct2_ss = chan->con_priv;
- u32 id = chan - pcc_mbox_channels;
+ val &= pchan->cmd_complete.status_mask;
+ if (!val)
+ return IRQ_NONE;
- doorbell_ack = &pcct2_ss->platform_ack_register;
- doorbell_ack_preserve = pcct2_ss->ack_preserve_mask;
- doorbell_ack_write = pcct2_ss->ack_write_mask;
+ ret = pcc_chan_reg_read(&pchan->error, &val);
+ if (ret)
+ return IRQ_NONE;
+ val &= pchan->error.status_mask;
+ if (val) {
+ val &= ~pchan->error.status_mask;
+ pcc_chan_reg_write(&pchan->error, val);
+ return IRQ_NONE;
+ }
- ret = read_register(pcc_doorbell_ack_vaddr[id],
- &doorbell_ack_val,
- doorbell_ack->bit_width);
- if (ret)
- return IRQ_NONE;
+ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+ return IRQ_NONE;
- ret = write_register(pcc_doorbell_ack_vaddr[id],
- (doorbell_ack_val & doorbell_ack_preserve)
- | doorbell_ack_write,
- doorbell_ack->bit_width);
- if (ret)
- return IRQ_NONE;
- }
+ mbox_chan_received_data(chan, NULL);
return IRQ_HANDLED;
}
@@ -224,29 +273,26 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
* ACPI package. This is used to lookup the array of PCC
* subspaces as parsed by the PCC Mailbox controller.
*
- * Return: Pointer to the Mailbox Channel if successful or
- * ERR_PTR.
+ * Return: Pointer to the PCC Mailbox Channel if successful or ERR_PTR.
*/
-struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id)
+struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
- struct device *dev = pcc_mbox_ctrl.dev;
+ struct pcc_chan_info *pchan;
struct mbox_chan *chan;
+ struct device *dev;
unsigned long flags;
- /*
- * Each PCC Subspace is a Mailbox Channel.
- * The PCC Clients get their PCC Subspace ID
- * from their own tables and pass it here.
- * This returns a pointer to the PCC subspace
- * for the Client to operate on.
- */
- chan = get_pcc_channel(subspace_id);
+ if (subspace_id < 0 || subspace_id >= pcc_chan_count)
+ return ERR_PTR(-ENOENT);
+ pchan = chan_info + subspace_id;
+ chan = pchan->chan.mchan;
if (IS_ERR(chan) || chan->cl) {
dev_err(dev, "Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
+ dev = chan->mbox->dev;
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
@@ -260,44 +306,40 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
spin_unlock_irqrestore(&chan->lock, flags);
- if (pcc_doorbell_irq[subspace_id] > 0) {
+ if (pchan->plat_irq > 0) {
int rc;
- rc = devm_request_irq(dev, pcc_doorbell_irq[subspace_id],
- pcc_mbox_irq, 0, MBOX_IRQ_NAME, chan);
+ rc = devm_request_irq(dev, pchan->plat_irq, pcc_mbox_irq, 0,
+ MBOX_IRQ_NAME, chan);
if (unlikely(rc)) {
dev_err(dev, "failed to register PCC interrupt %d\n",
- pcc_doorbell_irq[subspace_id]);
- pcc_mbox_free_channel(chan);
- chan = ERR_PTR(rc);
+ pchan->plat_irq);
+ pcc_mbox_free_channel(&pchan->chan);
+ return ERR_PTR(rc);
}
}
- return chan;
+ return &pchan->chan;
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
/**
* pcc_mbox_free_channel - Clients call this to free their Channel.
*
- * @chan: Pointer to the mailbox channel as returned by
- * pcc_mbox_request_channel()
+ * @pchan: Pointer to the PCC mailbox channel as returned by
+ * pcc_mbox_request_channel()
*/
-void pcc_mbox_free_channel(struct mbox_chan *chan)
+void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
{
- u32 id = chan - pcc_mbox_channels;
+ struct pcc_chan_info *pchan_info = to_pcc_chan_info(pchan);
+ struct mbox_chan *chan = pchan->mchan;
unsigned long flags;
if (!chan || !chan->cl)
return;
- if (id >= pcc_mbox_ctrl.num_chans) {
- pr_debug("pcc_mbox_free_channel: Invalid mbox_chan passed\n");
- return;
- }
-
- if (pcc_doorbell_irq[id] > 0)
- devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
+ if (pchan_info->plat_irq > 0)
+ devm_free_irq(chan->mbox->dev, pchan_info->plat_irq, chan);
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
@@ -323,40 +365,14 @@ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
*/
static int pcc_send_data(struct mbox_chan *chan, void *data)
{
- struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
- struct acpi_generic_address *doorbell;
- u64 doorbell_preserve;
- u64 doorbell_val;
- u64 doorbell_write;
- u32 id = chan - pcc_mbox_channels;
- int ret = 0;
-
- if (id >= pcc_mbox_ctrl.num_chans) {
- pr_debug("pcc_send_data: Invalid mbox_chan passed\n");
- return -ENOENT;
- }
+ int ret;
+ struct pcc_chan_info *pchan = chan->con_priv;
- doorbell = &pcct_ss->doorbell_register;
- doorbell_preserve = pcct_ss->preserve_mask;
- doorbell_write = pcct_ss->write_mask;
+ ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+ if (ret)
+ return ret;
- /* Sync notification from OS to Platform. */
- if (pcc_doorbell_vaddr[id]) {
- ret = read_register(pcc_doorbell_vaddr[id], &doorbell_val,
- doorbell->bit_width);
- if (ret)
- return ret;
- ret = write_register(pcc_doorbell_vaddr[id],
- (doorbell_val & doorbell_preserve) | doorbell_write,
- doorbell->bit_width);
- } else {
- ret = acpi_read(&doorbell_val, doorbell);
- if (ret)
- return ret;
- ret = acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
- doorbell);
- }
- return ret;
+ return pcc_chan_reg_read_modify_write(&pchan->db);
}
static const struct mbox_chan_ops pcc_chan_ops = {
@@ -364,7 +380,7 @@ static const struct mbox_chan_ops pcc_chan_ops = {
};
/**
- * parse_pcc_subspaces -- Count PCC subspaces defined
+ * parse_pcc_subspace - Count PCC subspaces defined
* @header: Pointer to the ACPI subtable header under the PCCT.
* @end: End of subtable entry.
*
@@ -384,41 +400,172 @@ static int parse_pcc_subspace(union acpi_subtable_headers *header,
return -EINVAL;
}
+static int
+pcc_chan_reg_init(struct pcc_chan_reg *reg, struct acpi_generic_address *gas,
+ u64 preserve_mask, u64 set_mask, u64 status_mask, char *name)
+{
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ if (!(gas->bit_width >= 8 && gas->bit_width <= 64 &&
+ is_power_of_2(gas->bit_width))) {
+ pr_err("Error: Cannot access register of %u bit width",
+ gas->bit_width);
+ return -EFAULT;
+ }
+
+ reg->vaddr = acpi_os_ioremap(gas->address, gas->bit_width / 8);
+ if (!reg->vaddr) {
+ pr_err("Failed to ioremap PCC %s register\n", name);
+ return -ENOMEM;
+ }
+ }
+ reg->gas = gas;
+ reg->preserve_mask = preserve_mask;
+ reg->set_mask = set_mask;
+ reg->status_mask = status_mask;
+ return 0;
+}
+
/**
* pcc_parse_subspace_irq - Parse the PCC IRQ and PCC ACK register
- * There should be one entry per PCC client.
- * @id: PCC subspace index.
- * @pcct_ss: Pointer to the ACPI subtable header under the PCCT.
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
*
* Return: 0 for Success, else errno.
*
- * This gets called for each entry in the PCC table.
+ * There should be one entry per PCC channel. This gets called for each
+ * entry in the PCC table. This uses PCCY Type1 structure for all applicable
+ * types(Type 1-4) to fetch irq
*/
-static int pcc_parse_subspace_irq(int id,
- struct acpi_pcct_hw_reduced *pcct_ss)
+static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
{
- pcc_doorbell_irq[id] = pcc_map_interrupt(pcct_ss->platform_interrupt,
- (u32)pcct_ss->flags);
- if (pcc_doorbell_irq[id] <= 0) {
+ int ret = 0;
+ struct acpi_pcct_hw_reduced *pcct_ss;
+
+ if (pcct_entry->type < ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
+ pcct_entry->type > ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ return 0;
+
+ pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
+ pchan->plat_irq = pcc_map_interrupt(pcct_ss->platform_interrupt,
+ (u32)pcct_ss->flags);
+ if (pchan->plat_irq <= 0) {
pr_err("PCC GSI %d not registered\n",
pcct_ss->platform_interrupt);
return -EINVAL;
}
- if (pcct_ss->header.type
- == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
- pcc_doorbell_ack_vaddr[id] = acpi_os_ioremap(
- pcct2_ss->platform_ack_register.address,
- pcct2_ss->platform_ack_register.bit_width / 8);
- if (!pcc_doorbell_ack_vaddr[id]) {
- pr_err("Failed to ioremap PCC ACK register\n");
- return -ENOMEM;
- }
+ ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
+ &pcct2_ss->platform_ack_register,
+ pcct2_ss->ack_preserve_mask,
+ pcct2_ss->ack_write_mask, 0,
+ "PLAT IRQ ACK");
+
+ } else if (pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE ||
+ pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) {
+ struct acpi_pcct_ext_pcc_master *pcct_ext = (void *)pcct_ss;
+
+ ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
+ &pcct_ext->platform_ack_register,
+ pcct_ext->ack_preserve_mask,
+ pcct_ext->ack_set_mask, 0,
+ "PLAT IRQ ACK");
}
- return 0;
+ return ret;
+}
+
+/**
+ * pcc_parse_subspace_db_reg - Parse the PCC doorbell register
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
+{
+ int ret = 0;
+
+ if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_subspace *pcct_ss;
+
+ pcct_ss = (struct acpi_pcct_subspace *)pcct_entry;
+
+ ret = pcc_chan_reg_init(&pchan->db,
+ &pcct_ss->doorbell_register,
+ pcct_ss->preserve_mask,
+ pcct_ss->write_mask, 0, "Doorbell");
+
+ } else {
+ struct acpi_pcct_ext_pcc_master *pcct_ext;
+
+ pcct_ext = (struct acpi_pcct_ext_pcc_master *)pcct_entry;
+
+ ret = pcc_chan_reg_init(&pchan->db,
+ &pcct_ext->doorbell_register,
+ pcct_ext->preserve_mask,
+ pcct_ext->write_mask, 0, "Doorbell");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->cmd_complete,
+ &pcct_ext->cmd_complete_register,
+ 0, 0, pcct_ext->cmd_complete_mask,
+ "Command Complete Check");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->cmd_update,
+ &pcct_ext->cmd_update_register,
+ pcct_ext->cmd_update_preserve_mask,
+ pcct_ext->cmd_update_set_mask, 0,
+ "Command Complete Update");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->error,
+ &pcct_ext->error_status_register,
+ 0, 0, pcct_ext->error_status_mask,
+ "Error Status");
+ }
+ return ret;
+}
+
+/**
+ * pcc_parse_subspace_shmem - Parse the PCC Shared Memory Region information
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
+ *
+ */
+static void pcc_parse_subspace_shmem(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
+{
+ if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_subspace *pcct_ss =
+ (struct acpi_pcct_subspace *)pcct_entry;
+
+ pchan->chan.shmem_base_addr = pcct_ss->base_address;
+ pchan->chan.shmem_size = pcct_ss->length;
+ pchan->chan.latency = pcct_ss->latency;
+ pchan->chan.max_access_rate = pcct_ss->max_access_rate;
+ pchan->chan.min_turnaround_time = pcct_ss->min_turnaround_time;
+ } else {
+ struct acpi_pcct_ext_pcc_master *pcct_ext =
+ (struct acpi_pcct_ext_pcc_master *)pcct_entry;
+
+ pchan->chan.shmem_base_addr = pcct_ext->base_address;
+ pchan->chan.shmem_size = pcct_ext->length;
+ pchan->chan.latency = pcct_ext->latency;
+ pchan->chan.max_access_rate = pcct_ext->max_access_rate;
+ pchan->chan.min_turnaround_time = pcct_ext->min_turnaround_time;
+ }
}
/**
@@ -428,16 +575,12 @@ static int pcc_parse_subspace_irq(int id,
*/
static int __init acpi_pcc_probe(void)
{
+ int count, i, rc = 0;
+ acpi_status status;
struct acpi_table_header *pcct_tbl;
- struct acpi_subtable_header *pcct_entry;
- struct acpi_table_pcct *acpi_pcct_tbl;
struct acpi_subtable_proc proc[ACPI_PCCT_TYPE_RESERVED];
- int count, i, rc;
- acpi_status status = AE_OK;
- /* Search for PCCT */
status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
-
if (ACPI_FAILURE(status) || !pcct_tbl)
return -ENODEV;
@@ -459,33 +602,60 @@ static int __init acpi_pcc_probe(void)
pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
rc = -EINVAL;
- goto err_put_pcct;
+ } else {
+ pcc_chan_count = count;
}
- pcc_mbox_channels = kcalloc(count, sizeof(struct mbox_chan),
- GFP_KERNEL);
- if (!pcc_mbox_channels) {
- pr_err("Could not allocate space for PCC mbox channels\n");
- rc = -ENOMEM;
- goto err_put_pcct;
- }
+ acpi_put_table(pcct_tbl);
+
+ return rc;
+}
+
+/**
+ * pcc_mbox_probe - Called when we find a match for the
+ * PCCT platform device. This is purely used to represent
+ * the PCCT as a virtual device for registering with the
+ * generic Mailbox framework.
+ *
+ * @pdev: Pointer to platform device returned when a match
+ * is found.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mbox_controller *pcc_mbox_ctrl;
+ struct mbox_chan *pcc_mbox_channels;
+ struct acpi_table_header *pcct_tbl;
+ struct acpi_subtable_header *pcct_entry;
+ struct acpi_table_pcct *acpi_pcct_tbl;
+ acpi_status status = AE_OK;
+ int i, rc, count = pcc_chan_count;
+
+ /* Search for PCCT */
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
+
+ if (ACPI_FAILURE(status) || !pcct_tbl)
+ return -ENODEV;
- pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
- if (!pcc_doorbell_vaddr) {
+ pcc_mbox_channels = devm_kcalloc(dev, count, sizeof(*pcc_mbox_channels),
+ GFP_KERNEL);
+ if (!pcc_mbox_channels) {
rc = -ENOMEM;
- goto err_free_mbox;
+ goto err;
}
- pcc_doorbell_ack_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
- if (!pcc_doorbell_ack_vaddr) {
+ chan_info = devm_kcalloc(dev, count, sizeof(*chan_info), GFP_KERNEL);
+ if (!chan_info) {
rc = -ENOMEM;
- goto err_free_db_vaddr;
+ goto err;
}
- pcc_doorbell_irq = kcalloc(count, sizeof(int), GFP_KERNEL);
- if (!pcc_doorbell_irq) {
+ pcc_mbox_ctrl = devm_kmalloc(dev, sizeof(*pcc_mbox_ctrl), GFP_KERNEL);
+ if (!pcc_mbox_ctrl) {
rc = -ENOMEM;
- goto err_free_db_ack_vaddr;
+ goto err;
}
/* Point to the first PCC subspace entry */
@@ -494,85 +664,55 @@ static int __init acpi_pcc_probe(void)
acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl;
if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL)
- pcc_mbox_ctrl.txdone_irq = true;
+ pcc_mbox_ctrl->txdone_irq = true;
for (i = 0; i < count; i++) {
- struct acpi_generic_address *db_reg;
- struct acpi_pcct_subspace *pcct_ss;
- pcc_mbox_channels[i].con_priv = pcct_entry;
+ struct pcc_chan_info *pchan = chan_info + i;
- if (pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
- pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
- struct acpi_pcct_hw_reduced *pcct_hrss;
+ pcc_mbox_channels[i].con_priv = pchan;
+ pchan->chan.mchan = &pcc_mbox_channels[i];
- pcct_hrss = (struct acpi_pcct_hw_reduced *) pcct_entry;
+ if (pcct_entry->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE &&
+ !pcc_mbox_ctrl->txdone_irq) {
+ pr_err("Plaform Interrupt flag must be set to 1");
+ rc = -EINVAL;
+ goto err;
+ }
- if (pcc_mbox_ctrl.txdone_irq) {
- rc = pcc_parse_subspace_irq(i, pcct_hrss);
- if (rc < 0)
- goto err;
- }
+ if (pcc_mbox_ctrl->txdone_irq) {
+ rc = pcc_parse_subspace_irq(pchan, pcct_entry);
+ if (rc < 0)
+ goto err;
}
- pcct_ss = (struct acpi_pcct_subspace *) pcct_entry;
+ rc = pcc_parse_subspace_db_reg(pchan, pcct_entry);
+ if (rc < 0)
+ goto err;
+
+ pcc_parse_subspace_shmem(pchan, pcct_entry);
- /* If doorbell is in system memory cache the virt address */
- db_reg = &pcct_ss->doorbell_register;
- if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
- db_reg->bit_width/8);
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}
- pcc_mbox_ctrl.num_chans = count;
+ pcc_mbox_ctrl->num_chans = count;
- pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
+ pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl->num_chans);
- return 0;
+ pcc_mbox_ctrl->chans = pcc_mbox_channels;
+ pcc_mbox_ctrl->ops = &pcc_chan_ops;
+ pcc_mbox_ctrl->dev = dev;
+ pr_info("Registering PCC driver as Mailbox controller\n");
+ rc = mbox_controller_register(pcc_mbox_ctrl);
+ if (rc)
+ pr_err("Err registering PCC as Mailbox controller: %d\n", rc);
+ else
+ return 0;
err:
- kfree(pcc_doorbell_irq);
-err_free_db_ack_vaddr:
- kfree(pcc_doorbell_ack_vaddr);
-err_free_db_vaddr:
- kfree(pcc_doorbell_vaddr);
-err_free_mbox:
- kfree(pcc_mbox_channels);
-err_put_pcct:
acpi_put_table(pcct_tbl);
return rc;
}
-/**
- * pcc_mbox_probe - Called when we find a match for the
- * PCCT platform device. This is purely used to represent
- * the PCCT as a virtual device for registering with the
- * generic Mailbox framework.
- *
- * @pdev: Pointer to platform device returned when a match
- * is found.
- *
- * Return: 0 for Success, else errno.
- */
-static int pcc_mbox_probe(struct platform_device *pdev)
-{
- int ret = 0;
-
- pcc_mbox_ctrl.chans = pcc_mbox_channels;
- pcc_mbox_ctrl.ops = &pcc_chan_ops;
- pcc_mbox_ctrl.dev = &pdev->dev;
-
- pr_info("Registering PCC driver as Mailbox controller\n");
- ret = mbox_controller_register(&pcc_mbox_ctrl);
-
- if (ret) {
- pr_err("Err registering PCC as Mailbox controller: %d\n", ret);
- ret = -ENODEV;
- }
-
- return ret;
-}
-
static struct platform_driver pcc_mbox_driver = {
.probe = pcc_mbox_probe,
.driver = {
diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c
index b6e34952246b..a5922ac0b0bf 100644
--- a/drivers/mailbox/platform_mhu.c
+++ b/drivers/mailbox/platform_mhu.c
@@ -117,7 +117,6 @@ static int platform_mhu_probe(struct platform_device *pdev)
int i, err;
struct platform_mhu *mhu;
struct device *dev = &pdev->dev;
- struct resource *res;
int platform_mhu_reg[MHU_CHANS] = {
MHU_SEC_OFFSET, MHU_LP_OFFSET, MHU_HP_OFFSET
};
@@ -127,8 +126,7 @@ static int platform_mhu_probe(struct platform_device *pdev)
if (!mhu)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mhu->base = devm_ioremap_resource(dev, res);
+ mhu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mhu->base)) {
dev_err(dev, "ioremap failed\n");
return PTR_ERR(mhu->base);
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 82ccfaf14b24..9325d2abc745 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -33,10 +33,6 @@ static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
.offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
};
-static const struct qcom_apcs_ipc_data ipq8074_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
.offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
};
@@ -49,18 +45,6 @@ static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
.offset = 16, .clk_name = NULL
};
-static const struct qcom_apcs_ipc_data msm8998_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
-static const struct qcom_apcs_ipc_data sdm660_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
-static const struct qcom_apcs_ipc_data sm6125_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
.offset = 12, .clk_name = NULL
};
@@ -95,7 +79,6 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
struct qcom_apcs_ipc *apcs;
const struct qcom_apcs_ipc_data *apcs_data;
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
unsigned long i;
int ret;
@@ -104,8 +87,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
if (!apcs)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -160,21 +142,22 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
- { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &sdm660_apcs_data },
+ { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &sm6125_apcs_data },
+ { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &sdm660_apcs_data },
+ { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
{}
};
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index b84e0587937c..15d538fe2113 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -205,7 +205,6 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct stm32_ipcc *ipcc;
- struct resource *res;
unsigned long i;
int ret;
u32 ip_ver;
@@ -235,8 +234,7 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
}
/* regs */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ipcc->reg_base = devm_ioremap_resource(dev, res);
+ ipcc->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ipcc->reg_base))
return PTR_ERR(ipcc->reg_base);
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
index ccecf2e5941d..7f8d931042d3 100644
--- a/drivers/mailbox/sun6i-msgbox.c
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -197,7 +197,6 @@ static int sun6i_msgbox_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mbox_chan *chans;
struct reset_control *reset;
- struct resource *res;
struct sun6i_msgbox *mbox;
int i, ret;
@@ -246,13 +245,7 @@ static int sun6i_msgbox_probe(struct platform_device *pdev)
goto err_disable_unprepare;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto err_disable_unprepare;
- }
-
- mbox->regs = devm_ioremap_resource(&pdev->dev, res);
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
dev_err(dev, "Failed to map MMIO resource: %d\n", ret);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 5fc989a6d452..9ed9c955add7 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -178,7 +178,6 @@
#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
-#include <linux/bcache.h>
#include <linux/bio.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -190,6 +189,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include "bcache_ondisk.h"
#include "bset.h"
#include "util.h"
#include "closure.h"
@@ -395,8 +395,6 @@ struct cached_dev {
atomic_t io_errors;
unsigned int error_limit;
unsigned int offline_seconds;
-
- char backing_dev_name[BDEVNAME_SIZE];
};
enum alloc_reserve {
@@ -470,8 +468,6 @@ struct cache {
atomic_long_t meta_sectors_written;
atomic_long_t btree_sectors_written;
atomic_long_t sectors_written;
-
- char cache_dev_name[BDEVNAME_SIZE];
};
struct gc_stat {
diff --git a/drivers/md/bcache/bcache_ondisk.h b/drivers/md/bcache/bcache_ondisk.h
new file mode 100644
index 000000000000..97413586195b
--- /dev/null
+++ b/drivers/md/bcache/bcache_ondisk.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_BCACHE_H
+#define _LINUX_BCACHE_H
+
+/*
+ * Bcache on disk data structures
+ */
+
+#include <linux/types.h>
+
+#define BITMASK(name, type, field, offset, size) \
+static inline __u64 name(const type *k) \
+{ return (k->field >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(type *k, __u64 v) \
+{ \
+ k->field &= ~(~(~0ULL << size) << offset); \
+ k->field |= (v & ~(~0ULL << size)) << offset; \
+}
+
+/* Btree keys - all units are in sectors */
+
+struct bkey {
+ __u64 high;
+ __u64 low;
+ __u64 ptr[];
+};
+
+#define KEY_FIELD(name, field, offset, size) \
+ BITMASK(name, struct bkey, field, offset, size)
+
+#define PTR_FIELD(name, offset, size) \
+static inline __u64 name(const struct bkey *k, unsigned int i) \
+{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
+{ \
+ k->ptr[i] &= ~(~(~0ULL << size) << offset); \
+ k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
+}
+
+#define KEY_SIZE_BITS 16
+#define KEY_MAX_U64S 8
+
+KEY_FIELD(KEY_PTRS, high, 60, 3)
+KEY_FIELD(__PAD0, high, 58, 2)
+KEY_FIELD(KEY_CSUM, high, 56, 2)
+KEY_FIELD(__PAD1, high, 55, 1)
+KEY_FIELD(KEY_DIRTY, high, 36, 1)
+
+KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
+KEY_FIELD(KEY_INODE, high, 0, 20)
+
+/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
+
+static inline __u64 KEY_OFFSET(const struct bkey *k)
+{
+ return k->low;
+}
+
+static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
+{
+ k->low = v;
+}
+
+/*
+ * The high bit being set is a relic from when we used it to do binary
+ * searches - it told you where a key started. It's not used anymore,
+ * and can probably be safely dropped.
+ */
+#define KEY(inode, offset, size) \
+((struct bkey) { \
+ .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
+ .low = (offset) \
+})
+
+#define ZERO_KEY KEY(0, 0, 0)
+
+#define MAX_KEY_INODE (~(~0 << 20))
+#define MAX_KEY_OFFSET (~0ULL >> 1)
+#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
+
+#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
+#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
+
+#define PTR_DEV_BITS 12
+
+PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
+PTR_FIELD(PTR_OFFSET, 8, 43)
+PTR_FIELD(PTR_GEN, 0, 8)
+
+#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
+
+#define MAKE_PTR(gen, offset, dev) \
+ ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
+
+/* Bkey utility code */
+
+static inline unsigned long bkey_u64s(const struct bkey *k)
+{
+ return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
+}
+
+static inline unsigned long bkey_bytes(const struct bkey *k)
+{
+ return bkey_u64s(k) * sizeof(__u64);
+}
+
+#define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
+
+static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+{
+ SET_KEY_INODE(dest, KEY_INODE(src));
+ SET_KEY_OFFSET(dest, KEY_OFFSET(src));
+}
+
+static inline struct bkey *bkey_next(const struct bkey *k)
+{
+ __u64 *d = (void *) k;
+
+ return (struct bkey *) (d + bkey_u64s(k));
+}
+
+static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
+{
+ __u64 *d = (void *) k;
+
+ return (struct bkey *) (d + nr_keys);
+}
+/* Enough for a key with 6 pointers */
+#define BKEY_PAD 8
+
+#define BKEY_PADDED(key) \
+ union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
+
+/* Superblock */
+
+/* Version 0: Cache device
+ * Version 1: Backing device
+ * Version 2: Seed pointer into btree node checksum
+ * Version 3: Cache device with new UUID format
+ * Version 4: Backing device with data offset
+ */
+#define BCACHE_SB_VERSION_CDEV 0
+#define BCACHE_SB_VERSION_BDEV 1
+#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
+#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
+#define BCACHE_SB_VERSION_CDEV_WITH_FEATURES 5
+#define BCACHE_SB_VERSION_BDEV_WITH_FEATURES 6
+#define BCACHE_SB_MAX_VERSION 6
+
+#define SB_SECTOR 8
+#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
+#define SB_SIZE 4096
+#define SB_LABEL_SIZE 32
+#define SB_JOURNAL_BUCKETS 256U
+/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
+#define MAX_CACHES_PER_SET 8
+
+#define BDEV_DATA_START_DEFAULT 16 /* sectors */
+
+struct cache_sb_disk {
+ __le64 csum;
+ __le64 offset; /* sector where this sb was written */
+ __le64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __le64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __le64 flags;
+ __le64 seq;
+
+ __le64 feature_compat;
+ __le64 feature_incompat;
+ __le64 feature_ro_compat;
+
+ __le64 pad[5];
+
+ union {
+ struct {
+ /* Cache devices */
+ __le64 nbuckets; /* device size */
+
+ __le16 block_size; /* sectors */
+ __le16 bucket_size; /* sectors */
+
+ __le16 nr_in_set;
+ __le16 nr_this_dev;
+ };
+ struct {
+ /* Backing devices */
+ __le64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __le32 last_mount; /* time overflow in y2106 */
+
+ __le16 first_bucket;
+ union {
+ __le16 njournal_buckets;
+ __le16 keys;
+ };
+ __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+ __le16 obso_bucket_size_hi; /* obsoleted */
+};
+
+/*
+ * This is for in-memory bcache super block.
+ * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
+ * size, ordering and even whole struct size may be different
+ * from cache_sb_disk.
+ */
+struct cache_sb {
+ __u64 offset; /* sector where this sb was written */
+ __u64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __u64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __u64 flags;
+ __u64 seq;
+
+ __u64 feature_compat;
+ __u64 feature_incompat;
+ __u64 feature_ro_compat;
+
+ union {
+ struct {
+ /* Cache devices */
+ __u64 nbuckets; /* device size */
+
+ __u16 block_size; /* sectors */
+ __u16 nr_in_set;
+ __u16 nr_this_dev;
+ __u32 bucket_size; /* sectors */
+ };
+ struct {
+ /* Backing devices */
+ __u64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __u32 last_mount; /* time overflow in y2106 */
+
+ __u16 first_bucket;
+ union {
+ __u16 njournal_buckets;
+ __u16 keys;
+ };
+ __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+};
+
+static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
+{
+ return sb->version == BCACHE_SB_VERSION_BDEV
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
+}
+
+BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
+BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
+BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
+#define CACHE_REPLACEMENT_LRU 0U
+#define CACHE_REPLACEMENT_FIFO 1U
+#define CACHE_REPLACEMENT_RANDOM 2U
+
+BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
+#define CACHE_MODE_WRITETHROUGH 0U
+#define CACHE_MODE_WRITEBACK 1U
+#define CACHE_MODE_WRITEAROUND 2U
+#define CACHE_MODE_NONE 3U
+BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
+#define BDEV_STATE_NONE 0U
+#define BDEV_STATE_CLEAN 1U
+#define BDEV_STATE_DIRTY 2U
+#define BDEV_STATE_STALE 3U
+
+/*
+ * Magic numbers
+ *
+ * The various other data structures have their own magic numbers, which are
+ * xored with the first part of the cache set's UUID
+ */
+
+#define JSET_MAGIC 0x245235c1a3625032ULL
+#define PSET_MAGIC 0x6750e15f87337f91ULL
+#define BSET_MAGIC 0x90135c78b99e07f5ULL
+
+static inline __u64 jset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ JSET_MAGIC;
+}
+
+static inline __u64 pset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ PSET_MAGIC;
+}
+
+static inline __u64 bset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ BSET_MAGIC;
+}
+
+/*
+ * Journal
+ *
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+
+#define BCACHE_JSET_VERSION_UUIDv1 1
+#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
+#define BCACHE_JSET_VERSION 1
+
+struct jset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ __u64 last_seq;
+
+ BKEY_PADDED(uuid_bucket);
+ BKEY_PADDED(btree_root);
+ __u16 btree_level;
+ __u16 pad[3];
+
+ __u64 prio_bucket[MAX_CACHES_PER_SET];
+
+ union {
+ struct bkey start[0];
+ __u64 d[0];
+ };
+};
+
+/* Bucket prios/gens */
+
+struct prio_set {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 pad;
+
+ __u64 next_bucket;
+
+ struct bucket_disk {
+ __u16 prio;
+ __u8 gen;
+ } __attribute((packed)) data[];
+};
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry {
+ union {
+ struct {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg; /* time overflow in y2106 */
+ __u32 last_reg;
+ __u32 invalidated;
+
+ __u32 flags;
+ /* Size of flash only volumes */
+ __u64 sectors;
+ };
+
+ __u8 pad[128];
+ };
+};
+
+BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
+
+/* Btree nodes */
+
+/* Version 1: Seed pointer into btree node checksum
+ */
+#define BCACHE_BSET_CSUM 1
+#define BCACHE_BSET_VERSION 1
+
+/*
+ * Btree nodes
+ *
+ * On disk a btree node is a list/log of these; within each set the keys are
+ * sorted
+ */
+struct bset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ union {
+ struct bkey start[0];
+ __u64 d[0];
+ };
+};
+
+/* OBSOLETE */
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry_v0 {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg;
+ __u32 last_reg;
+ __u32 invalidated;
+ __u32 pad;
+};
+
+#endif /* _LINUX_BCACHE_H */
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index a50dcfda656f..d795c84246b0 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -2,10 +2,10 @@
#ifndef _BCACHE_BSET_H
#define _BCACHE_BSET_H
-#include <linux/bcache.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "bcache_ondisk.h"
#include "util.h" /* for time_stats */
/*
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 0595559de174..93b67b8d31c3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -141,7 +141,7 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i)
uint64_t crc = b->key.ptr[0];
void *data = (void *) i + 8, *end = bset_bkey_last(i);
- crc = bch_crc64_update(crc, data, end - data);
+ crc = crc64_be(crc, data, end - data);
return crc ^ 0xffffffffffffffffULL;
}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 116edda845c3..6230dfdd9286 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -127,21 +127,20 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
citer.bi_size = UINT_MAX;
bio_for_each_segment(bv, bio, iter) {
- void *p1 = kmap_atomic(bv.bv_page);
+ void *p1 = bvec_kmap_local(&bv);
void *p2;
cbv = bio_iter_iovec(check, citer);
- p2 = page_address(cbv.bv_page);
+ p2 = bvec_kmap_local(&cbv);
- cache_set_err_on(memcmp(p1 + bv.bv_offset,
- p2 + bv.bv_offset,
- bv.bv_len),
+ cache_set_err_on(memcmp(p1, p2, bv.bv_len),
dc->disk.c,
- "verify failed at dev %s sector %llu",
- dc->backing_dev_name,
+ "verify failed at dev %pg sector %llu",
+ dc->bdev,
(uint64_t) bio->bi_iter.bi_sector);
- kunmap_atomic(p1);
+ kunmap_local(p2);
+ kunmap_local(p1);
bio_advance_iter(check, &citer, bv.bv_len);
}
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 6d2b7b84a7b7..634922c5601d 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -6,7 +6,7 @@
* Copyright 2020 Coly Li <colyli@suse.de>
*
*/
-#include <linux/bcache.h>
+#include "bcache_ondisk.h"
#include "bcache.h"
#include "features.h"
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
index d1c8fd3977fc..09161b89c63e 100644
--- a/drivers/md/bcache/features.h
+++ b/drivers/md/bcache/features.h
@@ -2,10 +2,11 @@
#ifndef _BCACHE_FEATURES_H
#define _BCACHE_FEATURES_H
-#include <linux/bcache.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "bcache_ondisk.h"
+
#define BCH_FEATURE_COMPAT 0
#define BCH_FEATURE_RO_COMPAT 1
#define BCH_FEATURE_INCOMPAT 2
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index e4388fe3ab7e..9c6f9ec55b72 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -65,15 +65,15 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
* we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
*/
if (bio->bi_opf & REQ_RAHEAD) {
- pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n",
- dc->backing_dev_name);
+ pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
+ dc->bdev);
return;
}
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
- pr_err("%s: IO error on backing device, unrecoverable\n",
- dc->backing_dev_name);
+ pr_err("%pg: IO error on backing device, unrecoverable\n",
+ dc->bdev);
else
bch_cached_dev_error(dc);
}
@@ -123,13 +123,13 @@ void bch_count_io_errors(struct cache *ca,
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s%s\n",
- ca->cache_dev_name, m,
+ pr_err("%pg: IO error on %s%s\n",
+ ca->bdev, m,
is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
- "%s: too many IO errors %s\n",
- ca->cache_dev_name, m);
+ "%pg: too many IO errors %s\n",
+ ca->bdev, m);
}
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 6d1de889baeb..d15aae6c51c1 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -46,7 +46,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
bio_for_each_segment(bv, bio, iter) {
void *d = kmap(bv.bv_page) + bv.bv_offset;
- csum = bch_crc64_update(csum, d, bv.bv_len);
+ csum = crc64_be(csum, d, bv.bv_len);
kunmap(bv.bv_page);
}
@@ -651,8 +651,8 @@ static void backing_request_endio(struct bio *bio)
*/
if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) {
- pr_err("Can't flush %s: returned bi_status %i\n",
- dc->backing_dev_name, bio->bi_status);
+ pr_err("Can't flush %pg: returned bi_status %i\n",
+ dc->bdev, bio->bi_status);
} else {
/* set to orig_bio->bi_status in bio_complete() */
s->iop.status = bio->bi_status;
@@ -1163,7 +1163,7 @@ static void quit_max_writeback_rate(struct cache_set *c,
/* Cached devices - read & write stuff */
-blk_qc_t cached_dev_submit_bio(struct bio *bio)
+void cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct block_device *orig_bdev = bio->bi_bdev;
@@ -1176,7 +1176,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
dc->io_disable)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
if (likely(d->c)) {
@@ -1222,8 +1222,6 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
} else
/* I/O request sent to backing device */
detached_dev_do_request(d, bio, orig_bdev, start_time);
-
- return BLK_QC_T_NONE;
}
static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
@@ -1273,7 +1271,7 @@ static void flash_dev_nodata(struct closure *cl)
continue_at(cl, search_free, NULL);
}
-blk_qc_t flash_dev_submit_bio(struct bio *bio)
+void flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
@@ -1282,7 +1280,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
@@ -1298,7 +1296,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
bcache_wq);
- return BLK_QC_T_NONE;
+ return;
} else if (bio_data_dir(bio)) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
@@ -1314,7 +1312,6 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
}
continue_at(cl, search_free, NULL);
- return BLK_QC_T_NONE;
}
static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 82b38366a95d..38ab4856eaab 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
-blk_qc_t cached_dev_submit_bio(struct bio *bio);
+void cached_dev_submit_bio(struct bio *bio);
void bch_flash_dev_request_init(struct bcache_device *d);
-blk_qc_t flash_dev_submit_bio(struct bio *bio);
+void flash_dev_submit_bio(struct bio *bio);
extern struct kmem_cache *bch_search_cache;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f2874c77ff79..4a9a65dff95e 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1002,7 +1002,7 @@ static void calc_cached_dev_sectors(struct cache_set *c)
struct cached_dev *dc;
list_for_each_entry(dc, &c->cached_devs, list)
- sectors += bdev_sectors(dc->bdev);
+ sectors += bdev_nr_sectors(dc->bdev);
c->cached_dev_sectors = sectors;
}
@@ -1026,8 +1026,8 @@ static int cached_dev_status_update(void *arg)
dc->offline_seconds = 0;
if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
- pr_err("%s: device offline for %d seconds\n",
- dc->backing_dev_name,
+ pr_err("%pg: device offline for %d seconds\n",
+ dc->bdev,
BACKING_DEV_OFFLINE_TIMEOUT);
pr_err("%s: disable I/O request due to backing device offline\n",
dc->disk.name);
@@ -1058,15 +1058,13 @@ int bch_cached_dev_run(struct cached_dev *dc)
};
if (dc->io_disable) {
- pr_err("I/O disabled on cached dev %s\n",
- dc->backing_dev_name);
+ pr_err("I/O disabled on cached dev %pg\n", dc->bdev);
ret = -EIO;
goto out;
}
if (atomic_xchg(&dc->running, 1)) {
- pr_info("cached dev %s is running already\n",
- dc->backing_dev_name);
+ pr_info("cached dev %pg is running already\n", dc->bdev);
ret = -EBUSY;
goto out;
}
@@ -1082,7 +1080,9 @@ int bch_cached_dev_run(struct cached_dev *dc)
closure_sync(&cl);
}
- add_disk(d->disk);
+ ret = add_disk(d->disk);
+ if (ret)
+ goto out;
bd_link_disk_holder(dc->bdev, dc->disk.disk);
/*
* won't show up in the uevent file, use udevadm monitor -e instead
@@ -1154,16 +1154,16 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
- calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
+ calc_cached_dev_sectors(dc->disk.c);
clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
mutex_unlock(&bch_register_lock);
- pr_info("Caching disabled for %s\n", dc->backing_dev_name);
+ pr_info("Caching disabled for %pg\n", dc->bdev);
/* Drop ref we took in cached_dev_detach() */
closure_put(&dc->disk.cl);
@@ -1203,29 +1203,27 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -ENOENT;
if (dc->disk.c) {
- pr_err("Can't attach %s: already attached\n",
- dc->backing_dev_name);
+ pr_err("Can't attach %pg: already attached\n", dc->bdev);
return -EINVAL;
}
if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
- pr_err("Can't attach %s: shutting down\n",
- dc->backing_dev_name);
+ pr_err("Can't attach %pg: shutting down\n", dc->bdev);
return -EINVAL;
}
if (dc->sb.block_size < c->cache->sb.block_size) {
/* Will die */
- pr_err("Couldn't attach %s: block size less than set's block size\n",
- dc->backing_dev_name);
+ pr_err("Couldn't attach %pg: block size less than set's block size\n",
+ dc->bdev);
return -EINVAL;
}
/* Check whether already attached */
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
- pr_err("Tried to attach %s but duplicate UUID already attached\n",
- dc->backing_dev_name);
+ pr_err("Tried to attach %pg but duplicate UUID already attached\n",
+ dc->bdev);
return -EINVAL;
}
@@ -1243,15 +1241,13 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
if (!u) {
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- pr_err("Couldn't find uuid for %s in set\n",
- dc->backing_dev_name);
+ pr_err("Couldn't find uuid for %pg in set\n", dc->bdev);
return -ENOENT;
}
u = uuid_find_empty(c);
if (!u) {
- pr_err("Not caching %s, no room for UUID\n",
- dc->backing_dev_name);
+ pr_err("Not caching %pg, no room for UUID\n", dc->bdev);
return -EINVAL;
}
}
@@ -1319,8 +1315,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
*/
kthread_stop(dc->writeback_thread);
cancel_writeback_rate_update_dwork(dc);
- pr_err("Couldn't run cached device %s\n",
- dc->backing_dev_name);
+ pr_err("Couldn't run cached device %pg\n", dc->bdev);
return ret;
}
@@ -1336,8 +1331,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
- pr_info("Caching %s as %s on set %pU\n",
- dc->backing_dev_name,
+ pr_info("Caching %pg as %s on set %pU\n",
+ dc->bdev,
dc->disk.disk->disk_name,
dc->disk.c->set_uuid);
return 0;
@@ -1461,7 +1456,6 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct cache_set *c;
int ret = -ENOMEM;
- bdevname(bdev, dc->backing_dev_name);
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
@@ -1476,7 +1470,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
- pr_info("registered backing device %s\n", dc->backing_dev_name);
+ pr_info("registered backing device %pg\n", dc->bdev);
list_add(&dc->list, &uncached_devices);
/* attach to a matched cache set if it exists */
@@ -1493,7 +1487,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
return 0;
err:
- pr_notice("error %s: %s\n", dc->backing_dev_name, err);
+ pr_notice("error %pg: %s\n", dc->bdev, err);
bcache_device_stop(&dc->disk);
return ret;
}
@@ -1534,10 +1528,11 @@ static void flash_dev_flush(struct closure *cl)
static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
{
+ int err = -ENOMEM;
struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
GFP_KERNEL);
if (!d)
- return -ENOMEM;
+ goto err_ret;
closure_init(&d->cl, NULL);
set_closure_fn(&d->cl, flash_dev_flush, system_wq);
@@ -1551,9 +1546,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_attach(d, c, u - c->uuids);
bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
- add_disk(d->disk);
+ err = add_disk(d->disk);
+ if (err)
+ goto err;
- if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
+ err = kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache");
+ if (err)
goto err;
bcache_device_link(d, c, "volume");
@@ -1567,7 +1565,8 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
return 0;
err:
kobject_put(&d->kobj);
- return -ENOMEM;
+err_ret:
+ return err;
}
static int flash_devs_run(struct cache_set *c)
@@ -1621,8 +1620,8 @@ bool bch_cached_dev_error(struct cached_dev *dc)
/* make others know io_disable is true earlier */
smp_mb();
- pr_err("stop %s: too many IO errors on backing device %s\n",
- dc->disk.disk->disk_name, dc->backing_dev_name);
+ pr_err("stop %s: too many IO errors on backing device %pg\n",
+ dc->disk.disk->disk_name, dc->bdev);
bcache_device_stop(&dc->disk);
return true;
@@ -2338,7 +2337,7 @@ err_btree_alloc:
err_free:
module_put(THIS_MODULE);
if (err)
- pr_notice("error %s: %s\n", ca->cache_dev_name, err);
+ pr_notice("error %pg: %s\n", ca->bdev, err);
return ret;
}
@@ -2348,7 +2347,6 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
const char *err = NULL; /* must be set for any error case */
int ret = 0;
- bdevname(bdev, ca->cache_dev_name);
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
@@ -2390,14 +2388,14 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
- pr_info("registered cache device %s\n", ca->cache_dev_name);
+ pr_info("registered cache device %pg\n", ca->bdev);
out:
kobject_put(&ca->kobj);
err:
if (err)
- pr_notice("error %s: %s\n", ca->cache_dev_name, err);
+ pr_notice("error %pg: %s\n", ca->bdev, err);
return ret;
}
@@ -2617,8 +2615,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
- if (!dc)
+ if (!dc) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
goto out_put_sb_page;
+ }
mutex_lock(&bch_register_lock);
ret = register_bdev(sb, sb_disk, bdev, dc);
@@ -2629,11 +2630,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
- if (!ca)
+ if (!ca) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
goto out_put_sb_page;
+ }
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(sb, sb_disk, bdev, ca) != 0)
+ ret = register_cache(sb, sb_disk, bdev, ca);
+ if (ret)
goto out_free_sb;
}
@@ -2750,7 +2755,7 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
* The reason bch_register_lock is not held to call
* bch_cache_set_stop() and bcache_device_stop() is to
* avoid potential deadlock during reboot, because cache
- * set or bcache device stopping process will acqurie
+ * set or bcache device stopping process will acquire
* bch_register_lock too.
*
* We are safe here because bcache_is_reboot sets to
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 05ac1d6fbbf3..1f0dce30fa75 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -271,7 +271,7 @@ SHOW(__bch_cached_dev)
}
if (attr == &sysfs_backing_dev_name) {
- snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
+ snprintf(buf, BDEVNAME_SIZE + 1, "%pg", dc->bdev);
strcat(buf, "\n");
return strlen(buf);
}
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
index 215df32f567b..c1752ba2e05b 100644
--- a/drivers/md/bcache/sysfs.h
+++ b/drivers/md/bcache/sysfs.h
@@ -51,13 +51,27 @@ STORE(fn) \
#define sysfs_printf(file, fmt, ...) \
do { \
if (attr == &sysfs_ ## file) \
- return snprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__); \
+ return sysfs_emit(buf, fmt "\n", __VA_ARGS__); \
} while (0)
#define sysfs_print(file, var) \
do { \
if (attr == &sysfs_ ## file) \
- return snprint(buf, PAGE_SIZE, var); \
+ return sysfs_emit(buf, \
+ __builtin_types_compatible_p(typeof(var), int) \
+ ? "%i\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned int) \
+ ? "%u\n" : \
+ __builtin_types_compatible_p(typeof(var), long) \
+ ? "%li\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned long)\
+ ? "%lu\n" : \
+ __builtin_types_compatible_p(typeof(var), int64_t) \
+ ? "%lli\n" : \
+ __builtin_types_compatible_p(typeof(var), uint64_t) \
+ ? "%llu\n" : \
+ __builtin_types_compatible_p(typeof(var), const char *) \
+ ? "%s\n" : "%i\n", var); \
} while (0)
#define sysfs_hprint(file, val) \
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index b64460a76267..6f3cb7c92130 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -340,23 +340,6 @@ static inline int bch_strtoul_h(const char *cp, long *res)
_r; \
})
-#define snprint(buf, size, var) \
- snprintf(buf, size, \
- __builtin_types_compatible_p(typeof(var), int) \
- ? "%i\n" : \
- __builtin_types_compatible_p(typeof(var), unsigned int) \
- ? "%u\n" : \
- __builtin_types_compatible_p(typeof(var), long) \
- ? "%li\n" : \
- __builtin_types_compatible_p(typeof(var), unsigned long)\
- ? "%lu\n" : \
- __builtin_types_compatible_p(typeof(var), int64_t) \
- ? "%lli\n" : \
- __builtin_types_compatible_p(typeof(var), uint64_t) \
- ? "%llu\n" : \
- __builtin_types_compatible_p(typeof(var), const char *) \
- ? "%s\n" : "%i\n", var)
-
ssize_t bch_hprint(char *buf, int64_t v);
bool bch_is_zero(const char *p, size_t n);
@@ -548,14 +531,6 @@ static inline uint64_t bch_crc64(const void *p, size_t len)
return crc ^ 0xffffffffffffffffULL;
}
-static inline uint64_t bch_crc64_update(uint64_t crc,
- const void *p,
- size_t len)
-{
- crc = crc64_be(crc, p, len);
- return crc;
-}
-
/*
* A stepwise-linear pseudo-exponential. This returns 1 << (x >>
* frac_bits), with the less-significant bits filled in by linear
@@ -584,8 +559,4 @@ static inline unsigned int fract_exp_two(unsigned int x,
void bch_bio_map(struct bio *bio, void *base);
int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
-static inline sector_t bdev_sectors(struct block_device *bdev)
-{
- return bdev->bd_inode->i_size >> 9;
-}
#endif /* _BCACHE_UTIL_H */
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 8120da278161..c7560f66dca8 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -45,7 +45,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
* backing volume uses about 2% of the cache for dirty data.
*/
uint32_t bdev_share =
- div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
+ div64_u64(bdev_nr_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
c->cached_dev_sectors);
uint64_t cache_dirty_target =
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index a3b71350eec8..745e3ab4aa0a 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -8,6 +8,7 @@
#define DM_BIO_RECORD_H
#include <linux/bio.h>
+#include <linux/blk-integrity.h>
/*
* There are lots of mutable fields in the bio struct that get
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 50f3e673729c..104ebc1f08dc 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1525,7 +1525,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
{
- sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t s = bdev_nr_sectors(c->bdev);
if (s >= c->start)
s -= c->start;
else
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 89a73204dbf4..2874f222c313 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -334,7 +334,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
int r;
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
- sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t bdev_size = bdev_nr_sectors(cmd->bdev);
/* FIXME: see if we can lose the max sectors limit */
if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index bdd500447dea..447d030036d1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1940,7 +1940,7 @@ static void cache_dtr(struct dm_target *ti)
static sector_t get_dev_size(struct dm_dev *dev)
{
- return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(dev->bdev);
}
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index edd22e4d65df..4599632d7a84 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -1514,7 +1514,7 @@ error:
static sector_t get_dev_size(struct dm_dev *dev)
{
- return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(dev->bdev);
}
/*---------------------------------------------------------------------------*/
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 55dccdfbcb22..b855fef4f38a 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -13,7 +13,7 @@
#include <linux/ktime.h>
#include <linux/genhd.h>
#include <linux/blk-mq.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#include <trace/events/block.h>
@@ -200,7 +200,7 @@ struct dm_table {
struct dm_md_mempools *mempools;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- struct blk_keyslot_manager *ksm;
+ struct blk_crypto_profile *crypto_profile;
#endif
};
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 916b7da16de2..292f7896f733 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -15,6 +15,7 @@
#include <linux/key.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 3163e2b1418e..03672204b0e3 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -415,7 +415,7 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
struct dust_device *dd = ti->private;
- sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t size = bdev_nr_sectors(dd->dev->bdev);
bool invalid_msg = false;
int r = -EINVAL;
unsigned long long tmp, block;
@@ -544,8 +544,7 @@ static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (dd->start ||
- ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (dd->start || ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index d25989660a76..7ce5d509b940 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -416,7 +416,7 @@ static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
* Only pass ioctls through if the device sizes match exactly.
*/
*bdev = dev->bdev;
- return !!(ec->start || ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT);
+ return !!(ec->start || ti->len != bdev_nr_sectors(dev->bdev));
}
static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 2a78f6874143..1f6bf152b3c7 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1681,7 +1681,7 @@ static int era_message(struct dm_target *ti, unsigned argc, char **argv,
static sector_t get_dev_size(struct dm_dev *dev)
{
- return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(dev->bdev);
}
static int era_iterate_devices(struct dm_target *ti,
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 3f4139ac1f60..b5f20eba3641 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -168,7 +168,7 @@ static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
*/
static inline sector_t get_dev_size(struct block_device *bdev)
{
- return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(bdev);
}
static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 4b94ffe6f2d4..345229d7e59c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -456,8 +456,7 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (fc->start ||
- ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
return 1;
return 0;
}
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index 2c5edfbd7711..957999998d70 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -12,6 +12,7 @@
#include "dm-ima.h"
#include <linux/ima.h>
+#include <linux/sched/mm.h>
#include <crypto/hash.h>
#include <linux/crypto.h>
#include <crypto/hash_info.h>
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index dc03b70f6e65..d0f788e72abf 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4113,11 +4113,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
}
- ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
if (!ic->meta_dev)
ic->meta_device_sectors = ic->data_device_sectors;
else
- ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
if (!journal_sectors) {
journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
@@ -4367,7 +4367,7 @@ try_smaller_buffer:
DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
- DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
+ DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 679b4c0a2eea..66ba16713f69 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -135,8 +135,7 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (lc->start ||
- ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (lc->start || ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
}
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index d93a4db23512..46de085a9670 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -446,7 +446,7 @@ static int log_super(struct log_writes_c *lc)
static inline sector_t logdev_last_sector(struct log_writes_c *lc)
{
- return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(lc->logdev->bdev);
}
static int log_writes_kthread(void *arg)
@@ -851,7 +851,7 @@ static int log_writes_prepare_ioctl(struct dm_target *ti,
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 1ecf75ef276a..06f328928a7f 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -447,7 +447,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
bdev_logical_block_size(lc->header_location.
bdev));
- if (buf_size > i_size_read(dev->bdev->bd_inode)) {
+ if (buf_size > bdev_nr_bytes(dev->bdev)) {
DMWARN("log device %s too small: need %llu bytes",
dev->name, (unsigned long long)buf_size);
kfree(lc);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 694aaca4eea2..90dc9cc48881 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -530,7 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
bdev = pgpath->path.dev->bdev;
q = bdev_get_queue(bdev);
- clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
+ clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
BLK_MQ_REQ_NOWAIT);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
@@ -579,7 +579,7 @@ static void multipath_release_clone(struct request *clone,
clone->io_start_time_ns);
}
- blk_put_request(clone);
+ blk_mq_free_request(clone);
}
/*
@@ -2061,7 +2061,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ if (!r && ti->len != bdev_nr_sectors((*bdev)))
return 1;
return r;
}
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 1856a1b125cc..875bca30a0dd 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -27,6 +27,7 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/sched/clock.h>
#define DM_MSG_PREFIX "multipath historical-service-time"
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index d9ef52159a22..2b26435a6946 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1261,7 +1261,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
md_rdev_init(jdev);
jdev->mddev = &rs->md;
jdev->bdev = rs->journal_dev.dev->bdev;
- jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
+ jdev->sectors = bdev_nr_sectors(jdev->bdev);
if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
rs->ti->error = "No space for raid4/5/6 journal";
return -ENOSPC;
@@ -1607,7 +1607,7 @@ static int _check_data_dev_sectors(struct raid_set *rs)
rdev_for_each(rdev, &rs->md)
if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
- ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
+ ds = min(ds, bdev_nr_sectors(rdev->bdev));
if (ds < rs->md.dev_sectors) {
rs->ti->error = "Component device(s) too small";
return -EINVAL;
@@ -2662,7 +2662,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
* Make sure we got a minimum amount of free sectors per device
*/
if (rs->data_offset &&
- to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
+ bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
rs->ti->error = data_offset ? "No space for forward reshape" :
"No space for backward reshape";
return -ENOSPC;
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index a896dea9750e..579ab6183d4d 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -7,7 +7,6 @@
#include "dm-core.h"
#include "dm-rq.h"
-#include <linux/elevator.h> /* for rq_end_sector() */
#include <linux/blk-mq.h>
#define DM_MSG_PREFIX "core-rq"
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 028a92ff6d57..534dc2ca8bb0 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -529,7 +529,7 @@ static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
* Only pass ioctls through if the device sizes match exactly.
*/
if (ti->len + sctx->path_list[path_nr].start !=
- i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ bdev_nr_sectors((*bdev)))
return 1;
return 0;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2111daaacaba..bcddc5effd15 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -169,7 +170,7 @@ static void free_devices(struct list_head *devices, struct mapped_device *md)
}
}
-static void dm_table_destroy_keyslot_manager(struct dm_table *t);
+static void dm_table_destroy_crypto_profile(struct dm_table *t);
void dm_table_destroy(struct dm_table *t)
{
@@ -199,7 +200,7 @@ void dm_table_destroy(struct dm_table *t)
dm_free_md_mempools(t->mempools);
- dm_table_destroy_keyslot_manager(t);
+ dm_table_destroy_crypto_profile(t);
kfree(t);
}
@@ -226,8 +227,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
{
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
- sector_t dev_size =
- i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t dev_size = bdev_nr_sectors(bdev);
unsigned short logical_block_size_sectors =
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
@@ -1186,8 +1186,8 @@ static int dm_table_register_integrity(struct dm_table *t)
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-struct dm_keyslot_manager {
- struct blk_keyslot_manager ksm;
+struct dm_crypto_profile {
+ struct blk_crypto_profile profile;
struct mapped_device *md;
};
@@ -1213,13 +1213,11 @@ static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
* When an inline encryption key is evicted from a device-mapper device, evict
* it from all the underlying devices.
*/
-static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
+static int dm_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key, unsigned int slot)
{
- struct dm_keyslot_manager *dksm = container_of(ksm,
- struct dm_keyslot_manager,
- ksm);
- struct mapped_device *md = dksm->md;
+ struct mapped_device *md =
+ container_of(profile, struct dm_crypto_profile, profile)->md;
struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;
@@ -1239,150 +1237,148 @@ static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
return args.err;
}
-static const struct blk_ksm_ll_ops dm_ksm_ll_ops = {
- .keyslot_evict = dm_keyslot_evict,
-};
-
-static int device_intersect_crypto_modes(struct dm_target *ti,
- struct dm_dev *dev, sector_t start,
- sector_t len, void *data)
+static int
+device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- struct blk_keyslot_manager *parent = data;
- struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
+ struct blk_crypto_profile *parent = data;
+ struct blk_crypto_profile *child =
+ bdev_get_queue(dev->bdev)->crypto_profile;
- blk_ksm_intersect_modes(parent, child);
+ blk_crypto_intersect_capabilities(parent, child);
return 0;
}
-void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{
- struct dm_keyslot_manager *dksm = container_of(ksm,
- struct dm_keyslot_manager,
- ksm);
+ struct dm_crypto_profile *dmcp = container_of(profile,
+ struct dm_crypto_profile,
+ profile);
- if (!ksm)
+ if (!profile)
return;
- blk_ksm_destroy(ksm);
- kfree(dksm);
+ blk_crypto_profile_destroy(profile);
+ kfree(dmcp);
}
-static void dm_table_destroy_keyslot_manager(struct dm_table *t)
+static void dm_table_destroy_crypto_profile(struct dm_table *t)
{
- dm_destroy_keyslot_manager(t->ksm);
- t->ksm = NULL;
+ dm_destroy_crypto_profile(t->crypto_profile);
+ t->crypto_profile = NULL;
}
/*
- * Constructs and initializes t->ksm with a keyslot manager that
- * represents the common set of crypto capabilities of the devices
- * described by the dm_table. However, if the constructed keyslot
- * manager does not support a superset of the crypto capabilities
- * supported by the current keyslot manager of the mapped_device,
- * it returns an error instead, since we don't support restricting
- * crypto capabilities on table changes. Finally, if the constructed
- * keyslot manager doesn't actually support any crypto modes at all,
- * it just returns NULL.
+ * Constructs and initializes t->crypto_profile with a crypto profile that
+ * represents the common set of crypto capabilities of the devices described by
+ * the dm_table. However, if the constructed crypto profile doesn't support all
+ * crypto capabilities that are supported by the current mapped_device, it
+ * returns an error instead, since we don't support removing crypto capabilities
+ * on table changes. Finally, if the constructed crypto profile is "empty" (has
+ * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
*/
-static int dm_table_construct_keyslot_manager(struct dm_table *t)
+static int dm_table_construct_crypto_profile(struct dm_table *t)
{
- struct dm_keyslot_manager *dksm;
- struct blk_keyslot_manager *ksm;
+ struct dm_crypto_profile *dmcp;
+ struct blk_crypto_profile *profile;
struct dm_target *ti;
unsigned int i;
- bool ksm_is_empty = true;
+ bool empty_profile = true;
- dksm = kmalloc(sizeof(*dksm), GFP_KERNEL);
- if (!dksm)
+ dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
+ if (!dmcp)
return -ENOMEM;
- dksm->md = t->md;
+ dmcp->md = t->md;
- ksm = &dksm->ksm;
- blk_ksm_init_passthrough(ksm);
- ksm->ksm_ll_ops = dm_ksm_ll_ops;
- ksm->max_dun_bytes_supported = UINT_MAX;
- memset(ksm->crypto_modes_supported, 0xFF,
- sizeof(ksm->crypto_modes_supported));
+ profile = &dmcp->profile;
+ blk_crypto_profile_init(profile, 0);
+ profile->ll_ops.keyslot_evict = dm_keyslot_evict;
+ profile->max_dun_bytes_supported = UINT_MAX;
+ memset(profile->modes_supported, 0xFF,
+ sizeof(profile->modes_supported));
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!dm_target_passes_crypto(ti->type)) {
- blk_ksm_intersect_modes(ksm, NULL);
+ blk_crypto_intersect_capabilities(profile, NULL);
break;
}
if (!ti->type->iterate_devices)
continue;
- ti->type->iterate_devices(ti, device_intersect_crypto_modes,
- ksm);
+ ti->type->iterate_devices(ti,
+ device_intersect_crypto_capabilities,
+ profile);
}
- if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) {
+ if (t->md->queue &&
+ !blk_crypto_has_capabilities(profile,
+ t->md->queue->crypto_profile)) {
DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
- dm_destroy_keyslot_manager(ksm);
+ dm_destroy_crypto_profile(profile);
return -EINVAL;
}
/*
- * If the new KSM doesn't actually support any crypto modes, we may as
- * well represent it with a NULL ksm.
+ * If the new profile doesn't actually support any crypto capabilities,
+ * we may as well represent it with a NULL profile.
*/
- ksm_is_empty = true;
- for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) {
- if (ksm->crypto_modes_supported[i]) {
- ksm_is_empty = false;
+ for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
+ if (profile->modes_supported[i]) {
+ empty_profile = false;
break;
}
}
- if (ksm_is_empty) {
- dm_destroy_keyslot_manager(ksm);
- ksm = NULL;
+ if (empty_profile) {
+ dm_destroy_crypto_profile(profile);
+ profile = NULL;
}
/*
- * t->ksm is only set temporarily while the table is being set
- * up, and it gets set to NULL after the capabilities have
- * been transferred to the request_queue.
+ * t->crypto_profile is only set temporarily while the table is being
+ * set up, and it gets set to NULL after the profile has been
+ * transferred to the request_queue.
*/
- t->ksm = ksm;
+ t->crypto_profile = profile;
return 0;
}
-static void dm_update_keyslot_manager(struct request_queue *q,
- struct dm_table *t)
+static void dm_update_crypto_profile(struct request_queue *q,
+ struct dm_table *t)
{
- if (!t->ksm)
+ if (!t->crypto_profile)
return;
- /* Make the ksm less restrictive */
- if (!q->ksm) {
- blk_ksm_register(t->ksm, q);
+ /* Make the crypto profile less restrictive. */
+ if (!q->crypto_profile) {
+ blk_crypto_register(t->crypto_profile, q);
} else {
- blk_ksm_update_capabilities(q->ksm, t->ksm);
- dm_destroy_keyslot_manager(t->ksm);
+ blk_crypto_update_capabilities(q->crypto_profile,
+ t->crypto_profile);
+ dm_destroy_crypto_profile(t->crypto_profile);
}
- t->ksm = NULL;
+ t->crypto_profile = NULL;
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static int dm_table_construct_keyslot_manager(struct dm_table *t)
+static int dm_table_construct_crypto_profile(struct dm_table *t)
{
return 0;
}
-void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{
}
-static void dm_table_destroy_keyslot_manager(struct dm_table *t)
+static void dm_table_destroy_crypto_profile(struct dm_table *t)
{
}
-static void dm_update_keyslot_manager(struct request_queue *q,
- struct dm_table *t)
+static void dm_update_crypto_profile(struct request_queue *q,
+ struct dm_table *t)
{
}
@@ -1414,9 +1410,9 @@ int dm_table_complete(struct dm_table *t)
return r;
}
- r = dm_table_construct_keyslot_manager(t);
+ r = dm_table_construct_crypto_profile(t);
if (r) {
- DMERR("could not construct keyslot manager.");
+ DMERR("could not construct crypto profile.");
return r;
}
@@ -2070,7 +2066,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
return r;
}
- dm_update_keyslot_manager(q, t);
+ dm_update_crypto_profile(q, t);
disk_update_readahead(t->md->disk);
return 0;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index c88ed14d49e6..1a96a07cbf44 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -549,7 +549,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
int r;
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
- sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t bdev_size = bdev_nr_sectors(pmd->bdev);
if (bdev_size > THIN_METADATA_MAX_SECTORS)
bdev_size = THIN_METADATA_MAX_SECTORS;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4c67b77c23c1..ec119d2422d5 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3212,7 +3212,7 @@ static int metadata_pre_commit_callback(void *context)
static sector_t get_dev_size(struct block_device *bdev)
{
- return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(bdev);
}
static void warn_if_metadata_device_too_big(struct block_device *bdev)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 88288c8d6bc8..a7efe83aad29 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -18,6 +18,7 @@
#include "dm-verity-verify-sig.h"
#include <linux/module.h>
#include <linux/reboot.h>
+#include <linux/scatterlist.h>
#define DM_MSG_PREFIX "verity"
@@ -833,8 +834,7 @@ static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
*bdev = v->data_dev->bdev;
- if (v->data_start ||
- ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (v->data_start || ti->len != bdev_nr_sectors(v->data_dev->bdev))
return 1;
return 0;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 18320444fb0a..017806096b91 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -2341,7 +2341,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Cache data device lookup failed";
goto bad;
}
- wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
+ wc->memory_map_size = bdev_nr_bytes(wc->ssd_dev->bdev);
/*
* Parse the cache block size
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index ae1bc48c0043..8dc21c09329f 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -733,7 +733,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path,
dev->dev_idx = idx;
(void)bdevname(dev->bdev, dev->name);
- dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ dev->capacity = bdev_nr_sectors(bdev);
if (ti->begin) {
ti->error = "Partial mapping is not supported";
goto err;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 76d9da49fda7..63aa52263658 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -29,7 +29,7 @@
#include <linux/refcount.h>
#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#define DM_MSG_PREFIX "core"
@@ -1183,14 +1183,13 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
mutex_unlock(&md->swap_bios_lock);
}
-static blk_qc_t __map_bio(struct dm_target_io *tio)
+static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
- blk_qc_t ret = BLK_QC_T_NONE;
clone->bi_end_io = clone_endio;
@@ -1226,7 +1225,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
- ret = submit_bio_noacct(clone);
+ submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
if (unlikely(swap_bios_limit(ti, clone))) {
@@ -1248,8 +1247,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
-
- return ret;
}
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
@@ -1336,7 +1333,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
}
-static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
+static void __clone_and_map_simple_bio(struct clone_info *ci,
struct dm_target_io *tio, unsigned *len)
{
struct bio *clone = &tio->clone;
@@ -1346,8 +1343,7 @@ static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
__bio_clone_fast(clone, ci->bio);
if (len)
bio_setup_sector(clone, ci->sector, *len);
-
- return __map_bio(tio);
+ __map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
@@ -1361,7 +1357,7 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
while ((bio = bio_list_pop(&blist))) {
tio = container_of(bio, struct dm_target_io, clone);
- (void) __clone_and_map_simple_bio(ci, tio, len);
+ __clone_and_map_simple_bio(ci, tio, len);
}
}
@@ -1405,7 +1401,7 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
free_tio(tio);
return r;
}
- (void) __map_bio(tio);
+ __map_bio(tio);
return 0;
}
@@ -1520,11 +1516,10 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
-static blk_qc_t __split_and_process_bio(struct mapped_device *md,
+static void __split_and_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
- blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
init_clone_info(&ci, md, map, bio);
@@ -1567,19 +1562,17 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
bio_chain(b, bio);
trace_block_split(b, bio->bi_iter.bi_sector);
- ret = submit_bio_noacct(bio);
+ submit_bio_noacct(bio);
}
}
/* drop the extra reference count */
dm_io_dec_pending(ci.io, errno_to_blk_status(error));
- return ret;
}
-static blk_qc_t dm_submit_bio(struct bio *bio)
+static void dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
- blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
@@ -1609,10 +1602,9 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
if (is_abnormal_io(bio))
blk_queue_split(&bio);
- ret = __split_and_process_bio(md, map, bio);
+ __split_and_process_bio(md, map, bio);
out:
dm_put_live_table(md, srcu_idx);
- return ret;
}
/*-----------------------------------------------------------------
@@ -1671,14 +1663,14 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-static void dm_queue_destroy_keyslot_manager(struct request_queue *q)
+static void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
- dm_destroy_keyslot_manager(q->ksm);
+ dm_destroy_crypto_profile(q->crypto_profile);
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q)
+static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
@@ -1704,7 +1696,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
dm_sysfs_exit(md);
del_gendisk(md->disk);
}
- dm_queue_destroy_keyslot_manager(md->queue);
+ dm_queue_destroy_crypto_profile(md->queue);
blk_cleanup_disk(md->disk);
}
@@ -2086,7 +2078,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
if (r)
return r;
- add_disk(md->disk);
+ r = add_disk(md->disk);
+ if (r)
+ return r;
r = dm_sysfs_init(md);
if (r) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6c0c3d0d905a..5111ed966947 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -41,6 +41,7 @@
#include <linux/sched/signal.h>
#include <linux/kthread.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/badblocks.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
@@ -51,6 +52,7 @@
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
+#include <linux/major.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/file.h>
@@ -352,7 +354,7 @@ static bool create_on_open = true;
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
-void md_new_event(struct mddev *mddev)
+void md_new_event(void)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
@@ -441,19 +443,19 @@ check_suspended:
}
EXPORT_SYMBOL(md_handle_request);
-static blk_qc_t md_submit_bio(struct bio *bio)
+static void md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return;
}
if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return;
}
blk_queue_split(&bio);
@@ -462,15 +464,13 @@ static blk_qc_t md_submit_bio(struct bio *bio)
if (bio_sectors(bio) != 0)
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
md_handle_request(mddev, bio);
-
- return BLK_QC_T_NONE;
}
/* mddev_suspend makes sure no new requests are submitted
@@ -888,8 +888,7 @@ static struct md_personality *find_pers(int level, char *clevel)
/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
{
- sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
- return MD_NEW_SIZE_SECTORS(num_sectors);
+ return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
}
static int alloc_disk_sb(struct md_rdev *rdev)
@@ -1631,8 +1630,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
*/
switch(minor_version) {
case 0:
- sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
- sb_start -= 8*2;
+ sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
sb_start &= ~(sector_t)(4*2-1);
break;
case 1:
@@ -1787,10 +1785,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
else
ret = 0;
}
- if (minor_version) {
- sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
- sectors -= rdev->data_offset;
- } else
+ if (minor_version)
+ sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
+ else
sectors = rdev->sb_start;
if (sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
@@ -2168,8 +2165,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
return 0; /* too confusing */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
- max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
- max_sectors -= rdev->data_offset;
+ max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
} else if (rdev->mddev->bitmap_info.offset) {
@@ -2178,7 +2174,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
} else {
/* minor version 0; superblock after data */
sector_t sb_start, bm_space;
- sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
+ sector_t dev_size = bdev_nr_sectors(rdev->bdev);
/* 8K is for superblock */
sb_start = dev_size - 8*2;
@@ -2886,7 +2882,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_new_event(mddev);
+ md_new_event();
md_wakeup_thread(mddev->thread);
return 0;
}
@@ -2976,7 +2972,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* -write_error - clears WriteErrorSeen
* {,-}failfast - set/clear FailFast
*/
+
+ struct mddev *mddev = rdev->mddev;
int err = -EINVAL;
+ bool need_update_sb = false;
+
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
@@ -2991,7 +2991,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
- struct mddev *mddev = rdev->mddev;
err = 0;
if (mddev_is_clustered(mddev))
err = md_cluster_ops->remove_disk(mddev, rdev);
@@ -3002,16 +3001,18 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
- md_new_event(mddev);
+ md_new_event();
}
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
mddev_create_serial_pool(rdev->mddev, rdev, false);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
mddev_destroy_serial_pool(rdev->mddev, rdev, false);
clear_bit(WriteMostly, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
@@ -3037,9 +3038,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
} else if (cmd_match(buf, "failfast")) {
set_bit(FailFast, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-failfast")) {
clear_bit(FailFast, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags)) {
@@ -3118,6 +3121,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
clear_bit(ExternalBbl, &rdev->flags);
err = 0;
}
+ if (need_update_sb)
+ md_update_sb(mddev, 1);
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
@@ -3382,7 +3387,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
if (!sectors)
return -EBUSY;
} else if (!sectors)
- sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
+ sectors = bdev_nr_sectors(rdev->bdev) -
rdev->data_offset;
if (!my_mddev->pers->resize)
/* Cannot change size for RAID0 or Linear etc */
@@ -3709,7 +3714,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
kobject_init(&rdev->kobj, &rdev_ktype);
- size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
+ size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
if (!size) {
pr_warn("md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
@@ -4099,7 +4104,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
if (!mddev->thread)
md_update_sb(mddev, 1);
sysfs_notify_dirent_safe(mddev->sysfs_level);
- md_new_event(mddev);
+ md_new_event();
rv = len;
out_unlock:
mddev_unlock(mddev);
@@ -4620,7 +4625,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
export_rdev(rdev);
mddev_unlock(mddev);
if (!err)
- md_new_event(mddev);
+ md_new_event();
return err ? err : len;
}
@@ -5490,6 +5495,10 @@ static struct attribute *md_default_attrs[] = {
NULL,
};
+static const struct attribute_group md_default_group = {
+ .attrs = md_default_attrs,
+};
+
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_last_scan_mode.attr,
@@ -5512,6 +5521,12 @@ static const struct attribute_group md_redundancy_group = {
.attrs = md_redundancy_attrs,
};
+static const struct attribute_group *md_attr_groups[] = {
+ &md_default_group,
+ &md_bitmap_group,
+ NULL,
+};
+
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
@@ -5587,7 +5602,7 @@ static const struct sysfs_ops md_sysfs_ops = {
static struct kobj_type md_ktype = {
.release = md_free,
.sysfs_ops = &md_sysfs_ops,
- .default_attrs = md_default_attrs,
+ .default_groups = md_attr_groups,
};
int mdp_major = 0;
@@ -5596,7 +5611,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
- sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
@@ -5663,7 +5677,7 @@ static int md_alloc(dev_t dev, char *name)
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
error = -EEXIST;
- goto abort;
+ goto out_unlock_disks_mutex;
}
spin_unlock(&all_mddevs_lock);
}
@@ -5676,7 +5690,7 @@ static int md_alloc(dev_t dev, char *name)
error = -ENOMEM;
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- goto abort;
+ goto out_unlock_disks_mutex;
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -5700,27 +5714,25 @@ static int md_alloc(dev_t dev, char *name)
disk->flags |= GENHD_FL_EXT_DEVT;
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
- add_disk(disk);
+ error = add_disk(disk);
+ if (error)
+ goto out_cleanup_disk;
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
- if (error) {
- /* This isn't possible, but as kobject_init_and_add is marked
- * __must_check, we must do something with the result
- */
- pr_debug("md: cannot register %s/md - name in use\n",
- disk->disk_name);
- error = 0;
- }
- if (mddev->kobj.sd &&
- sysfs_create_group(&mddev->kobj, &md_bitmap_group))
- pr_debug("pointless warning\n");
- abort:
+ if (error)
+ goto out_del_gendisk;
+
+ kobject_uevent(&mddev->kobj, KOBJ_ADD);
+ mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
+ mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
+ goto out_unlock_disks_mutex;
+
+out_del_gendisk:
+ del_gendisk(disk);
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
+out_unlock_disks_mutex:
mutex_unlock(&disks_mutex);
- if (!error && mddev->kobj.sd) {
- kobject_uevent(&mddev->kobj, KOBJ_ADD);
- mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
- mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
- }
mddev_put(mddev);
return error;
}
@@ -6034,7 +6046,7 @@ int md_run(struct mddev *mddev)
if (mddev->sb_flags)
md_update_sb(mddev, 0);
- md_new_event(mddev);
+ md_new_event();
return 0;
bitmap_abort:
@@ -6424,7 +6436,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
- md_new_event(mddev);
+ md_new_event();
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
@@ -6880,7 +6892,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
if (!mddev->persistent) {
pr_debug("md: nonpersistent superblock ...\n");
- rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
+ rdev->sb_start = bdev_nr_sectors(rdev->bdev);
} else
rdev->sb_start = calc_dev_sboffset(rdev);
rdev->sectors = rdev->sb_start;
@@ -6928,7 +6940,7 @@ kick_rdev:
md_wakeup_thread(mddev->thread);
else
md_update_sb(mddev, 1);
- md_new_event(mddev);
+ md_new_event();
return 0;
busy:
@@ -6967,7 +6979,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev);
else
- rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
+ rdev->sb_start = bdev_nr_sectors(rdev->bdev);
rdev->sectors = rdev->sb_start;
@@ -7001,7 +7013,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- md_new_event(mddev);
+ md_new_event();
return 0;
abort_export:
@@ -7975,7 +7987,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
- md_new_event(mddev);
+ md_new_event();
}
EXPORT_SYMBOL(md_error);
@@ -8859,7 +8871,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
- md_new_event(mddev);
+ md_new_event();
update_time = jiffies;
blk_start_plug(&plug);
@@ -8930,7 +8942,7 @@ void md_do_sync(struct md_thread *thread)
/* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
- md_new_event(mddev);
+ md_new_event();
if (last_check + window > io_sectors || j == max_sectors)
continue;
@@ -9154,7 +9166,7 @@ static int remove_and_add_spares(struct mddev *mddev,
sysfs_link_rdev(mddev, rdev);
if (!test_bit(Journal, &rdev->flags))
spares++;
- md_new_event(mddev);
+ md_new_event();
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
@@ -9188,7 +9200,7 @@ static void md_start_sync(struct work_struct *ws)
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- md_new_event(mddev);
+ md_new_event();
}
/*
@@ -9447,7 +9459,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- md_new_event(mddev);
+ md_new_event();
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 4c96c36bd01a..53ea7a6961de 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -731,7 +731,7 @@ extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags,
bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
-extern void md_new_event(struct mddev *mddev);
+extern void md_new_event(void);
extern void md_allow_write(struct mddev *mddev);
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 19598bd38939..7dc8026cf6ee 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1496,7 +1496,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (!r1_bio->bios[i])
continue;
- if (first_clone) {
+ if (first_clone && test_bit(WriteMostly, &rdev->flags)) {
/* do behind I/O ?
* Not if there are too many, or cannot
* allocate memory, or a reader on WriteMostly
@@ -1529,13 +1529,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
r1_bio->bios[i] = mbio;
- mbio->bi_iter.bi_sector = (r1_bio->sector +
- conf->mirrors[i].rdev->data_offset);
- bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
+ mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
+ bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
- if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
- !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
+ if (test_bit(FailFast, &rdev->flags) &&
+ !test_bit(WriteMostly, &rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio;
@@ -1546,7 +1545,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
+ mbio->bi_bdev = (void *)rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index aa2636582841..dde98f65bd04 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4647,7 +4647,7 @@ out:
}
conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
- md_new_event(mddev);
+ md_new_event();
return 0;
abort:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 02ed53b20654..9c1a5877cf9f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7732,10 +7732,7 @@ static int raid5_run(struct mddev *mddev)
* discard data disk but write parity disk
*/
stripe = stripe * PAGE_SIZE;
- /* Round up to power of 2, as discard handling
- * currently assumes that */
- while ((stripe-1) & stripe)
- stripe = (stripe | (stripe-1)) + 1;
+ stripe = roundup_pow_of_two(stripe);
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
@@ -8282,7 +8279,7 @@ static int raid5_start_reshape(struct mddev *mddev)
}
conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
- md_new_event(mddev);
+ md_new_event();
return 0;
}
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index 9ba3a00dce31..94ef3349b8d6 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -8,6 +8,8 @@ config CEC_NOTIFIER
config CEC_PIN
bool
+menu "CEC support"
+
config MEDIA_CEC_RC
bool "HDMI CEC RC integration"
depends on CEC_CORE && RC_CORE
@@ -37,3 +39,5 @@ source "drivers/media/cec/i2c/Kconfig"
source "drivers/media/cec/platform/Kconfig"
source "drivers/media/cec/usb/Kconfig"
endif
+
+endmenu
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index 8c613aa649c6..a60b6f03a6a1 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -957,7 +957,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
* so we can kick off the pending transmit.
*/
delta = ktime_us_delta(ts, pin->ts);
- if (delta / CEC_TIM_DATA_BIT_TOTAL >
+ if (delta / CEC_TIM_DATA_BIT_TOTAL >=
pin->tx_signal_free_time) {
pin->tx_nacked = false;
if (tx_custom_start(pin))
@@ -968,7 +968,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
cec_pin_low(pin);
break;
}
- if (delta / CEC_TIM_DATA_BIT_TOTAL >
+ if (delta / CEC_TIM_DATA_BIT_TOTAL >=
pin->tx_signal_free_time - 1)
pin->state = CEC_ST_TX_WAIT;
break;
diff --git a/drivers/media/cec/platform/meson/ao-cec-g12a.c b/drivers/media/cec/platform/meson/ao-cec-g12a.c
index 891533060d49..68fe6d6a8178 100644
--- a/drivers/media/cec/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/cec/platform/meson/ao-cec-g12a.c
@@ -633,7 +633,6 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
{
struct meson_ao_cec_g12a_device *ao_cec;
struct device *hdmi_dev;
- struct resource *res;
void __iomem *base;
int ret, irq;
@@ -664,8 +663,7 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
ao_cec->adap->owner = THIS_MODULE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_probe_adapter;
diff --git a/drivers/media/cec/platform/meson/ao-cec.c b/drivers/media/cec/platform/meson/ao-cec.c
index 09aff82c3773..6b440f0635d9 100644
--- a/drivers/media/cec/platform/meson/ao-cec.c
+++ b/drivers/media/cec/platform/meson/ao-cec.c
@@ -602,7 +602,6 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
{
struct meson_ao_cec_device *ao_cec;
struct device *hdmi_dev;
- struct resource *res;
int ret, irq;
hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
@@ -626,8 +625,7 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
ao_cec->adap->owner = THIS_MODULE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
+ ao_cec->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ao_cec->base)) {
ret = PTR_ERR(ao_cec->base);
goto out_probe_adapter;
diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
index 028a09a7531e..ce9a9d922f11 100644
--- a/drivers/media/cec/platform/s5p/s5p_cec.c
+++ b/drivers/media/cec/platform/s5p/s5p_cec.c
@@ -178,7 +178,6 @@ static int s5p_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *hdmi_dev;
- struct resource *res;
struct s5p_cec_dev *cec;
bool needs_hpd = of_property_read_bool(pdev->dev.of_node, "needs-hpd");
int ret;
@@ -212,8 +211,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
if (IS_ERR(cec->pmu))
return -EPROBE_DEFER;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->reg = devm_ioremap_resource(dev, res);
+ cec->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->reg))
return PTR_ERR(cec->reg);
diff --git a/drivers/media/cec/platform/sti/stih-cec.c b/drivers/media/cec/platform/sti/stih-cec.c
index f0c73e64b586..abf8e8bcbb34 100644
--- a/drivers/media/cec/platform/sti/stih-cec.c
+++ b/drivers/media/cec/platform/sti/stih-cec.c
@@ -299,7 +299,6 @@ static const struct cec_adap_ops sti_cec_adap_ops = {
static int stih_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct stih_cec *cec;
struct device *hdmi_dev;
int ret;
@@ -315,8 +314,7 @@ static int stih_cec_probe(struct platform_device *pdev)
cec->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->regs = devm_ioremap_resource(dev, res);
+ cec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->regs))
return PTR_ERR(cec->regs);
diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
index 0ffd89712536..40db7911b437 100644
--- a/drivers/media/cec/platform/stm32/stm32-cec.c
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
@@ -255,7 +255,6 @@ static const struct regmap_config stm32_cec_regmap_cfg = {
static int stm32_cec_probe(struct platform_device *pdev)
{
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_MODE_MONITOR_ALL;
- struct resource *res;
struct stm32_cec *cec;
void __iomem *mmio;
int ret;
@@ -266,8 +265,7 @@ static int stm32_cec_probe(struct platform_device *pdev)
cec->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio = devm_ioremap_resource(&pdev->dev, res);
+ mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio))
return PTR_ERR(mmio);
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index bceaf91faa15..7d4bc2733f2b 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -414,10 +414,10 @@ struct smscore_registry_entry_t {
static struct list_head g_smscore_notifyees;
static struct list_head g_smscore_devices;
-static struct mutex g_smscore_deviceslock;
+static DEFINE_MUTEX(g_smscore_deviceslock);
static struct list_head g_smscore_registry;
-static struct mutex g_smscore_registrylock;
+static DEFINE_MUTEX(g_smscore_registrylock);
static int default_mode = DEVICE_MODE_NONE;
@@ -2119,10 +2119,7 @@ static int __init smscore_module_init(void)
{
INIT_LIST_HEAD(&g_smscore_notifyees);
INIT_LIST_HEAD(&g_smscore_devices);
- mutex_init(&g_smscore_deviceslock);
-
INIT_LIST_HEAD(&g_smscore_registry);
- mutex_init(&g_smscore_registrylock);
return 0;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 508ac295eb06..2266bbd239ab 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -68,13 +68,13 @@ module_param(debug, int, 0644);
err; \
})
-#define call_ptr_memop(vb, op, args...) \
+#define call_ptr_memop(op, vb, args...) \
({ \
struct vb2_queue *_q = (vb)->vb2_queue; \
void *ptr; \
\
log_memop(vb, op); \
- ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
+ ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \
if (!IS_ERR_OR_NULL(ptr)) \
(vb)->cnt_mem_ ## op++; \
ptr; \
@@ -144,9 +144,9 @@ module_param(debug, int, 0644);
((vb)->vb2_queue->mem_ops->op ? \
(vb)->vb2_queue->mem_ops->op(args) : 0)
-#define call_ptr_memop(vb, op, args...) \
+#define call_ptr_memop(op, vb, args...) \
((vb)->vb2_queue->mem_ops->op ? \
- (vb)->vb2_queue->mem_ops->op(args) : NULL)
+ (vb)->vb2_queue->mem_ops->op(vb, args) : NULL)
#define call_void_memop(vb, op, args...) \
do { \
@@ -230,9 +230,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
if (size < vb->planes[plane].length)
goto free;
- mem_priv = call_ptr_memop(vb, alloc,
- q->alloc_devs[plane] ? : q->dev,
- q->dma_attrs, size, q->dma_dir, q->gfp_flags);
+ mem_priv = call_ptr_memop(alloc,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ size);
if (IS_ERR_OR_NULL(mem_priv)) {
if (mem_priv)
ret = PTR_ERR(mem_priv);
@@ -326,12 +327,9 @@ static void __vb2_buf_mem_prepare(struct vb2_buffer *vb)
if (vb->synced)
return;
- if (vb->need_cache_sync_on_prepare) {
- for (plane = 0; plane < vb->num_planes; ++plane)
- call_void_memop(vb, prepare,
- vb->planes[plane].mem_priv);
- }
vb->synced = 1;
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
}
/*
@@ -345,12 +343,9 @@ static void __vb2_buf_mem_finish(struct vb2_buffer *vb)
if (!vb->synced)
return;
- if (vb->need_cache_sync_on_finish) {
- for (plane = 0; plane < vb->num_planes; ++plane)
- call_void_memop(vb, finish,
- vb->planes[plane].mem_priv);
- }
vb->synced = 0;
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
}
/*
@@ -381,6 +376,27 @@ static void __setup_offsets(struct vb2_buffer *vb)
}
}
+static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+ /*
+ * DMA exporter should take care of cache syncs, so we can avoid
+ * explicit ->prepare()/->finish() syncs. For other ->memory types
+ * we always need ->prepare() or/and ->finish() cache sync.
+ */
+ if (q->memory == VB2_MEMORY_DMABUF) {
+ vb->skip_cache_sync_on_finish = 1;
+ vb->skip_cache_sync_on_prepare = 1;
+ return;
+ }
+
+ /*
+ * ->finish() cache sync can be avoided when queue direction is
+ * TO_DEVICE.
+ */
+ if (q->dma_dir == DMA_TO_DEVICE)
+ vb->skip_cache_sync_on_finish = 1;
+}
+
/*
* __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
* video buffer memory for all buffers/planes on the queue and initializes the
@@ -414,17 +430,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
vb->index = q->num_buffers + buffer;
vb->type = q->type;
vb->memory = memory;
- /*
- * We need to set these flags here so that the videobuf2 core
- * will call ->prepare()/->finish() cache sync/flush on vb2
- * buffers when appropriate. However, we can avoid explicit
- * ->prepare() and ->finish() cache sync for DMABUF buffers,
- * because DMA exporter takes care of it.
- */
- if (q->memory != VB2_MEMORY_DMABUF) {
- vb->need_cache_sync_on_prepare = 1;
- vb->need_cache_sync_on_finish = 1;
- }
+ init_buffer_cache_hints(q, vb);
for (plane = 0; plane < num_planes; ++plane) {
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];
@@ -732,11 +738,30 @@ int vb2_verify_memory_type(struct vb2_queue *q,
}
EXPORT_SYMBOL(vb2_verify_memory_type);
+static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
+{
+ q->non_coherent_mem = 0;
+
+ if (!vb2_queue_allows_cache_hints(q))
+ return;
+ q->non_coherent_mem = non_coherent_mem;
+}
+
+static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
+{
+ if (non_coherent_mem != q->non_coherent_mem) {
+ dprintk(q, 1, "memory coherency model mismatch\n");
+ return false;
+ }
+ return true;
+}
+
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count)
+ unsigned int flags, unsigned int *count)
{
unsigned int num_buffers, allocated_buffers, num_planes = 0;
unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
unsigned int i;
int ret;
@@ -751,7 +776,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
}
if (*count == 0 || q->num_buffers != 0 ||
- (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
+ (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) ||
+ !verify_coherency_flags(q, non_coherent_mem)) {
/*
* We already have buffers allocated, so first check if they
* are not in use and can be freed.
@@ -788,6 +814,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
+ set_queue_coherency(q, non_coherent_mem);
/*
* Ask the driver how many buffers and planes per buffer it requires.
@@ -872,12 +899,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count,
+ unsigned int flags, unsigned int *count,
unsigned int requested_planes,
const unsigned int requested_sizes[])
{
unsigned int num_planes = 0, num_buffers, allocated_buffers;
unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
int ret;
if (q->num_buffers == VB2_MAX_FRAME) {
@@ -893,11 +921,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
q->waiting_for_buffers = !q->is_output;
+ set_queue_coherency(q, non_coherent_mem);
} else {
if (q->memory != memory) {
dprintk(q, 1, "memory model mismatch\n");
return -EINVAL;
}
+ if (!verify_coherency_flags(q, non_coherent_mem))
+ return -EINVAL;
}
num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
@@ -975,7 +1006,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
+ return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
@@ -985,7 +1016,7 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
+ return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_cookie);
@@ -1125,10 +1156,11 @@ static int __prepare_userptr(struct vb2_buffer *vb)
vb->planes[plane].data_offset = 0;
/* Acquire each plane's memory */
- mem_priv = call_ptr_memop(vb, get_userptr,
- q->alloc_devs[plane] ? : q->dev,
- planes[plane].m.userptr,
- planes[plane].length, q->dma_dir);
+ mem_priv = call_ptr_memop(get_userptr,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ planes[plane].m.userptr,
+ planes[plane].length);
if (IS_ERR(mem_priv)) {
dprintk(q, 1, "failed acquiring userspace memory for plane %d\n",
plane);
@@ -1249,9 +1281,11 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
vb->planes[plane].data_offset = 0;
/* Acquire each plane's memory */
- mem_priv = call_ptr_memop(vb, attach_dmabuf,
- q->alloc_devs[plane] ? : q->dev,
- dbuf, planes[plane].length, q->dma_dir);
+ mem_priv = call_ptr_memop(attach_dmabuf,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ dbuf,
+ planes[plane].length);
if (IS_ERR(mem_priv)) {
dprintk(q, 1, "failed to attach dmabuf\n");
ret = PTR_ERR(mem_priv);
@@ -1421,9 +1455,19 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
static void vb2_req_queue(struct media_request_object *obj)
{
struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+ int err;
mutex_lock(vb->vb2_queue->lock);
- vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ /*
+ * There is no method to propagate an error from vb2_core_qbuf(),
+ * so if this returns a non-0 value, then WARN.
+ *
+ * The only exception is -EIO which is returned if q->error is
+ * set. We just ignore that, and expect this will be caught the
+ * next time vb2_req_prepare() is called.
+ */
+ err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ WARN_ON_ONCE(err && err != -EIO);
mutex_unlock(vb->vb2_queue->lock);
}
@@ -2187,8 +2231,10 @@ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
vb_plane = &vb->planes[plane];
- dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
- flags & O_ACCMODE);
+ dbuf = call_ptr_memop(get_dmabuf,
+ vb,
+ vb_plane->mem_priv,
+ flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(q, 1, "failed to export buffer %d, plane %d\n",
index, plane);
@@ -2342,6 +2388,17 @@ int vb2_core_queue_init(struct vb2_queue *q)
if (WARN_ON(q->requires_requests && !q->supports_requests))
return -EINVAL;
+ /*
+ * This combination is not allowed since a non-zero value of
+ * q->min_buffers_needed can cause vb2_core_qbuf() to fail if
+ * it has to call start_streaming(), and the Request API expects
+ * that queueing a request (and thus queueing a buffer contained
+ * in that request) will always succeed. There is no method of
+ * propagating an error back to userspace.
+ */
+ if (WARN_ON(q->supports_requests && q->min_buffers_needed))
+ return -EINVAL;
+
INIT_LIST_HEAD(&q->queued_list);
INIT_LIST_HEAD(&q->done_list);
spin_lock_init(&q->done_lock);
@@ -2576,7 +2633,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
fileio->memory = VB2_MEMORY_MMAP;
fileio->type = q->type;
q->fileio = fileio;
- ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+ ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
if (ret)
goto err_kfree;
@@ -2633,7 +2690,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
err_reqbufs:
fileio->count = 0;
- vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+ vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
err_kfree:
q->fileio = NULL;
@@ -2653,7 +2710,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
vb2_core_streamoff(q, q->type);
q->fileio = NULL;
fileio->count = 0;
- vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+ vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
kfree(fileio);
dprintk(q, 3, "file io emulator closed\n");
}
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index a7f61ba85440..38767791955d 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
@@ -40,6 +41,9 @@ struct vb2_dc_buf {
/* DMABUF related */
struct dma_buf_attachment *db_attach;
+
+ struct vb2_buffer *vb;
+ bool non_coherent_mem;
};
/*********************************************/
@@ -66,24 +70,46 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
/* callbacks for all buffers */
/*********************************************/
-static void *vb2_dc_cookie(void *buf_priv)
+static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
return &buf->dma_addr;
}
-static void *vb2_dc_vaddr(void *buf_priv)
+/*
+ * This function may fail if:
+ *
+ * - dma_buf_vmap() fails
+ * E.g. due to lack of virtual mapping address space, or due to
+ * dmabuf->ops misconfiguration.
+ *
+ * - dma_vmap_noncontiguous() fails
+ * For instance, when requested buffer size is larger than totalram_pages().
+ * Relevant for buffers that use non-coherent memory.
+ *
+ * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
+ * Relevant for buffers that use coherent memory.
+ */
+static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
- struct dma_buf_map map;
- int ret;
- if (!buf->vaddr && buf->db_attach) {
- ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
- buf->vaddr = ret ? NULL : map.vaddr;
+ if (buf->vaddr)
+ return buf->vaddr;
+
+ if (buf->db_attach) {
+ struct dma_buf_map map;
+
+ if (!dma_buf_vmap(buf->db_attach->dmabuf, &map))
+ buf->vaddr = map.vaddr;
+
+ return buf->vaddr;
}
+ if (buf->non_coherent_mem)
+ buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt);
return buf->vaddr;
}
@@ -99,10 +125,19 @@ static void vb2_dc_prepare(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- if (!sgt)
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_prepare)
return;
+ if (!buf->non_coherent_mem)
+ return;
+
+ /* For both USERPTR and non-coherent MMAP */
dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
+
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ flush_kernel_vmap_range(buf->vaddr, buf->size);
}
static void vb2_dc_finish(void *buf_priv)
@@ -110,10 +145,19 @@ static void vb2_dc_finish(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- if (!sgt)
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_finish)
+ return;
+
+ if (!buf->non_coherent_mem)
return;
+ /* For both USERPTR and non-coherent MMAP */
dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
+
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ invalidate_kernel_vmap_range(buf->vaddr, buf->size);
}
/*********************************************/
@@ -127,21 +171,69 @@ static void vb2_dc_put(void *buf_priv)
if (!refcount_dec_and_test(&buf->refcount))
return;
- if (buf->sgt_base) {
- sg_free_table(buf->sgt_base);
- kfree(buf->sgt_base);
+ if (buf->non_coherent_mem) {
+ if (buf->vaddr)
+ dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
+ dma_free_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt, buf->dma_dir);
+ } else {
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_attrs(buf->dev, buf->size, buf->cookie,
+ buf->dma_addr, buf->attrs);
}
- dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- buf->attrs);
put_device(buf->dev);
kfree(buf);
}
-static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->cookie = dma_alloc_attrs(buf->dev,
+ buf->size,
+ &buf->dma_addr,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->cookie)
+ return -ENOMEM;
+
+ if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+ return 0;
+
+ buf->vaddr = buf->cookie;
+ return 0;
+}
+
+static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
+ buf->size,
+ buf->dma_dir,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->dma_sgt)
+ return -ENOMEM;
+
+ buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
+
+ /*
+ * For non-coherent buffers the kernel mapping is created on demand
+ * in vb2_dc_vaddr().
+ */
+ return 0;
+}
+
+static void *vb2_dc_alloc(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long size)
{
struct vb2_dc_buf *buf;
+ int ret;
if (WARN_ON(!dev))
return ERR_PTR(-EINVAL);
@@ -150,22 +242,25 @@ static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->attrs = attrs;
- buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags, buf->attrs);
- if (!buf->cookie) {
- dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
- kfree(buf);
- return ERR_PTR(-ENOMEM);
- }
-
- if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
- buf->vaddr = buf->cookie;
+ buf->attrs = vb->vb2_queue->dma_attrs;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
+ buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
+ buf->size = size;
/* Prevent the device from being released while the buffer is used */
buf->dev = get_device(dev);
- buf->size = size;
- buf->dma_dir = dma_dir;
+
+ if (buf->non_coherent_mem)
+ ret = vb2_dc_alloc_non_coherent(buf);
+ else
+ ret = vb2_dc_alloc_coherent(buf);
+
+ if (ret) {
+ dev_err(dev, "dma alloc of size %ld failed\n", size);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dc_put;
@@ -186,9 +281,12 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
return -EINVAL;
}
- ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, buf->attrs);
-
+ if (buf->non_coherent_mem)
+ ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
+ buf->dma_sgt);
+ else
+ ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
+ buf->size, buf->attrs);
if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret);
return ret;
@@ -350,9 +448,15 @@ vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
- struct vb2_dc_buf *buf = dbuf->priv;
+ struct vb2_dc_buf *buf;
+ void *vaddr;
+
+ buf = dbuf->priv;
+ vaddr = vb2_dc_vaddr(buf->vb, buf);
+ if (!vaddr)
+ return -EINVAL;
- dma_buf_map_set_vaddr(map, buf->vaddr);
+ dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
@@ -380,6 +484,9 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
int ret;
struct sg_table *sgt;
+ if (buf->non_coherent_mem)
+ return buf->dma_sgt;
+
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
dev_err(buf->dev, "failed to alloc sg table\n");
@@ -397,7 +504,9 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return sgt;
}
-static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -459,8 +568,8 @@ static void vb2_dc_put_userptr(void *buf_priv)
kfree(buf);
}
-static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_dc_buf *buf;
struct frame_vector *vec;
@@ -490,7 +599,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
return ERR_PTR(-ENOMEM);
buf->dev = dev;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
offset = lower_32_bits(offset_in_page(vaddr));
vec = vb2_create_framevec(vaddr, size);
@@ -555,6 +665,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dma_addr = sg_dma_address(sgt->sgl);
buf->dma_sgt = sgt;
+ buf->non_coherent_mem = 1;
+
out:
buf->size = size;
@@ -660,8 +772,8 @@ static void vb2_dc_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
+ struct dma_buf *dbuf, unsigned long size)
{
struct vb2_dc_buf *buf;
struct dma_buf_attachment *dba;
@@ -677,6 +789,8 @@ static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM);
buf->dev = dev;
+ buf->vb = vb;
+
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach(dbuf, buf->dev);
if (IS_ERR(dba)) {
@@ -685,7 +799,7 @@ static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return dba;
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
buf->db_attach = dba;
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index c5b06a509566..33ee63a99139 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -51,6 +51,8 @@ struct vb2_dma_sg_buf {
struct vb2_vmarea_handler handler;
struct dma_buf_attachment *db_attach;
+
+ struct vb2_buffer *vb;
};
static void vb2_dma_sg_put(void *buf_priv);
@@ -96,9 +98,8 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
return 0;
}
-static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
+ unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
@@ -113,7 +114,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
return ERR_PTR(-ENOMEM);
buf->vaddr = NULL;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->offset = 0;
buf->size = size;
/* size is already page aligned */
@@ -130,7 +131,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
if (!buf->pages)
goto fail_pages_array_alloc;
- ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+ ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
if (ret)
goto fail_pages_alloc;
@@ -154,6 +155,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
buf->handler.arg = buf;
+ buf->vb = vb;
refcount_set(&buf->refcount, 1);
@@ -202,6 +204,9 @@ static void vb2_dma_sg_prepare(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
+ if (buf->vb->skip_cache_sync_on_prepare)
+ return;
+
dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
}
@@ -210,12 +215,14 @@ static void vb2_dma_sg_finish(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
+ if (buf->vb->skip_cache_sync_on_finish)
+ return;
+
dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
}
-static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size,
- enum dma_data_direction dma_dir)
+static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
@@ -230,7 +237,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
buf->vaddr = NULL;
buf->dev = dev;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
@@ -292,7 +299,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
kfree(buf);
}
-static void *vb2_dma_sg_vaddr(void *buf_priv)
+static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct dma_buf_map map;
@@ -511,7 +518,9 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
.release = vb2_dma_sg_dmabuf_ops_release,
};
-static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -605,8 +614,8 @@ static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
+ struct dma_buf *dbuf, unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct dma_buf_attachment *dba;
@@ -630,14 +639,14 @@ static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return dba;
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
buf->db_attach = dba;
return buf;
}
-static void *vb2_dma_sg_cookie(void *buf_priv)
+static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 2988bb38ceb1..6edf4508c636 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -345,24 +345,6 @@ static void set_buffer_cache_hints(struct vb2_queue *q,
struct vb2_buffer *vb,
struct v4l2_buffer *b)
{
- /*
- * DMA exporter should take care of cache syncs, so we can avoid
- * explicit ->prepare()/->finish() syncs. For other ->memory types
- * we always need ->prepare() or/and ->finish() cache sync.
- */
- if (q->memory == VB2_MEMORY_DMABUF) {
- vb->need_cache_sync_on_finish = 0;
- vb->need_cache_sync_on_prepare = 0;
- return;
- }
-
- /*
- * Cache sync/invalidation flags are set by default in order to
- * preserve existing behaviour for old apps/drivers.
- */
- vb->need_cache_sync_on_prepare = 1;
- vb->need_cache_sync_on_finish = 1;
-
if (!vb2_queue_allows_cache_hints(q)) {
/*
* Clear buffer cache flags if queue does not support user
@@ -374,18 +356,11 @@ static void set_buffer_cache_hints(struct vb2_queue *q,
return;
}
- /*
- * ->finish() cache sync can be avoided when queue direction is
- * TO_DEVICE.
- */
- if (q->dma_dir == DMA_TO_DEVICE)
- vb->need_cache_sync_on_finish = 0;
-
if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
- vb->need_cache_sync_on_finish = 0;
+ vb->skip_cache_sync_on_finish = 1;
if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
- vb->need_cache_sync_on_prepare = 0;
+ vb->skip_cache_sync_on_prepare = 1;
}
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
@@ -717,12 +692,32 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
#endif
}
+static void validate_memory_flags(struct vb2_queue *q,
+ int memory,
+ u32 *flags)
+{
+ if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
+ /*
+ * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
+ * but in order to avoid bugs we zero out all bits.
+ */
+ *flags = 0;
+ } else {
+ /* Clear all unknown flags. */
+ *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ }
+}
+
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
{
int ret = vb2_verify_memory_type(q, req->memory, req->type);
+ u32 flags = req->flags;
fill_buf_caps(q, &req->capabilities);
- return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
+ validate_memory_flags(q, req->memory, &flags);
+ req->flags = flags;
+ return ret ? ret : vb2_core_reqbufs(q, req->memory,
+ req->flags, &req->count);
}
EXPORT_SYMBOL_GPL(vb2_reqbufs);
@@ -754,6 +749,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
unsigned i;
fill_buf_caps(q, &create->capabilities);
+ validate_memory_flags(q, create->memory, &create->flags);
create->index = q->num_buffers;
if (create->count == 0)
return ret != -EBUSY ? ret : 0;
@@ -797,6 +793,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
if (requested_sizes[i] == 0)
return -EINVAL;
return ret ? ret : vb2_core_create_bufs(q, create->memory,
+ create->flags,
&create->count,
requested_planes,
requested_sizes);
@@ -993,13 +990,16 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
{
struct video_device *vdev = video_devdata(file);
int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
+ u32 flags = p->flags;
fill_buf_caps(vdev->queue, &p->capabilities);
+ validate_memory_flags(vdev->queue, p->memory, &flags);
+ p->flags = flags;
if (res)
return res;
if (vb2_queue_is_busy(vdev, file))
return -EBUSY;
- res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
+ res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count);
/* If count == 0, then the owner has released all buffers and he
is no longer owner of the queue. Otherwise we have a new owner. */
if (res == 0)
@@ -1017,6 +1017,7 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
p->index = vdev->queue->num_buffers;
fill_buf_caps(vdev->queue, &p->capabilities);
+ validate_memory_flags(vdev->queue, p->memory, &p->flags);
/*
* If count == 0, then just check if memory and type are valid.
* Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 83f95258ec8c..ef36abd912dc 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -34,13 +34,12 @@ struct vb2_vmalloc_buf {
static void vb2_vmalloc_put(void *buf_priv);
-static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
+ unsigned long size)
{
struct vb2_vmalloc_buf *buf;
- buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -52,7 +51,7 @@ static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
return ERR_PTR(-ENOMEM);
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_vmalloc_put;
buf->handler.arg = buf;
@@ -71,9 +70,8 @@ static void vb2_vmalloc_put(void *buf_priv)
}
}
-static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size,
- enum dma_data_direction dma_dir)
+static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_vmalloc_buf *buf;
struct frame_vector *vec;
@@ -84,7 +82,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
vec = vb2_create_framevec(vaddr, size);
@@ -147,7 +145,7 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
kfree(buf);
}
-static void *vb2_vmalloc_vaddr(void *buf_priv)
+static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
@@ -339,7 +337,9 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
.release = vb2_vmalloc_dmabuf_ops_release,
};
-static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_vmalloc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -403,8 +403,10 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
+ struct device *dev,
+ struct dma_buf *dbuf,
+ unsigned long size)
{
struct vb2_vmalloc_buf *buf;
@@ -416,7 +418,7 @@ static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM);
buf->dbuf = dbuf;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
return buf;
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index 6974f1731529..959d110407a4 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -342,7 +342,7 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
ctx->buf_siz = req->size;
ctx->buf_cnt = req->count;
- ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, &req->count);
+ ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, 0, &req->count);
if (ret) {
ctx->state = DVB_VB2_STATE_NONE;
dprintk(1, "[%s] count=%d size=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c
index f88b5355493e..1c8207ab8988 100644
--- a/drivers/media/dvb-frontends/cxd2099.c
+++ b/drivers/media/dvb-frontends/cxd2099.c
@@ -3,15 +3,6 @@
* cxd2099.c: Driver for the Sony CXD2099AR Common Interface Controller
*
* Copyright (C) 2010-2013 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/slab.h>
diff --git a/drivers/media/dvb-frontends/cxd2099.h b/drivers/media/dvb-frontends/cxd2099.h
index 0c101bdef01d..5d4060007c46 100644
--- a/drivers/media/dvb-frontends/cxd2099.h
+++ b/drivers/media/dvb-frontends/cxd2099.h
@@ -3,15 +3,6 @@
* cxd2099.h: Driver for the Sony CXD2099AR Common Interface Controller
*
* Copyright (C) 2010-2011 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CXD2099_H_
diff --git a/drivers/media/dvb-frontends/cxd2820r_priv.h b/drivers/media/dvb-frontends/cxd2820r_priv.h
index 7baf0162424f..09c42bcef971 100644
--- a/drivers/media/dvb-frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb-frontends/cxd2820r_priv.h
@@ -13,7 +13,7 @@
#include <media/dvb_frontend.h>
#include <media/dvb_math.h>
#include "cxd2820r.h"
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h> /* For gpio_chip */
#include <linux/math64.h>
#include <linux/regmap.h>
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index a7faf0cf8788..b74b9afed9a2 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -444,11 +444,11 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
unsigned layer)
{
int rc;
- int interleaving[] = {
+ static const int interleaving[] = {
0, 1, 2, 4, 8
};
- static unsigned char reg[] = {
+ static const unsigned char reg[] = {
[0] = 0x88, /* Layer A */
[1] = 0x8c, /* Layer B */
[2] = 0x90, /* Layer C */
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index e4528784f847..fff212c0bf3b 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -204,11 +204,18 @@ struct mn88443x_priv {
struct regmap *regmap_t;
};
-static void mn88443x_cmn_power_on(struct mn88443x_priv *chip)
+static int mn88443x_cmn_power_on(struct mn88443x_priv *chip)
{
+ struct device *dev = &chip->client_s->dev;
struct regmap *r_t = chip->regmap_t;
+ int ret;
- clk_prepare_enable(chip->mclk);
+ ret = clk_prepare_enable(chip->mclk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare and enable mclk: %d\n",
+ ret);
+ return ret;
+ }
gpiod_set_value_cansleep(chip->reset_gpio, 1);
usleep_range(100, 1000);
@@ -222,6 +229,8 @@ static void mn88443x_cmn_power_on(struct mn88443x_priv *chip)
} else {
regmap_write(r_t, HIZSET3, 0x8f);
}
+
+ return 0;
}
static void mn88443x_cmn_power_off(struct mn88443x_priv *chip)
@@ -738,7 +747,10 @@ static int mn88443x_probe(struct i2c_client *client,
chip->fe.demodulator_priv = chip;
i2c_set_clientdata(client, chip);
- mn88443x_cmn_power_on(chip);
+ ret = mn88443x_cmn_power_on(chip);
+ if (ret)
+ goto err_i2c_t;
+
mn88443x_s_sleep(chip);
mn88443x_t_sleep(chip);
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 0b00a23436ed..934d1c0b214a 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -9,15 +9,6 @@
* based on code:
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/mxl5xx.h b/drivers/media/dvb-frontends/mxl5xx.h
index 706a2f5d8f97..139e16b2ecfc 100644
--- a/drivers/media/dvb-frontends/mxl5xx.h
+++ b/drivers/media/dvb-frontends/mxl5xx.h
@@ -9,15 +9,6 @@
* based on code:
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _MXL5XX_H_
diff --git a/drivers/media/dvb-frontends/mxl5xx_defs.h b/drivers/media/dvb-frontends/mxl5xx_defs.h
index 1442af8dc176..097271f73740 100644
--- a/drivers/media/dvb-frontends/mxl5xx_defs.h
+++ b/drivers/media/dvb-frontends/mxl5xx_defs.h
@@ -7,10 +7,6 @@
* based on code:
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
*/
enum MXL_BOOL_E {
diff --git a/drivers/media/dvb-frontends/mxl5xx_regs.h b/drivers/media/dvb-frontends/mxl5xx_regs.h
index 86d5317eba7a..b38a13847033 100644
--- a/drivers/media/dvb-frontends/mxl5xx_regs.h
+++ b/drivers/media/dvb-frontends/mxl5xx_regs.h
@@ -2,16 +2,6 @@
/*
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
*
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
- *
* This program may alternatively be licensed under a proprietary license from
* MaxLinear, Inc.
*
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
index a246db683cdf..dd7954e8f553 100644
--- a/drivers/media/dvb-frontends/mxl692.c
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -7,15 +7,6 @@
* based on code:
* Copyright (c) 2016 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/mutex.h>
diff --git a/drivers/media/dvb-frontends/mxl692.h b/drivers/media/dvb-frontends/mxl692.h
index 45bc48f1f12f..77764a047c07 100644
--- a/drivers/media/dvb-frontends/mxl692.h
+++ b/drivers/media/dvb-frontends/mxl692.h
@@ -7,15 +7,6 @@
* based on code:
* Copyright (c) 2016 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _MXL692_H_
diff --git a/drivers/media/dvb-frontends/mxl692_defs.h b/drivers/media/dvb-frontends/mxl692_defs.h
index 776ac407b4e7..c603f3d6f27f 100644
--- a/drivers/media/dvb-frontends/mxl692_defs.h
+++ b/drivers/media/dvb-frontends/mxl692_defs.h
@@ -7,15 +7,6 @@
* based on code:
* Copyright (c) 2016 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*****************************************************************************
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 1a2f0d2adadf..6a4f2997d6f5 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -376,8 +376,11 @@ static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_dev *dev)
dev_dbg(&pdev->dev, "alloc urb=%d\n", i);
dev->urb_list[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_list[i]) {
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
usb_free_urb(dev->urb_list[j]);
+ dev->urb_list[j] = NULL;
+ }
+ dev->urbs_initialized = 0;
return -ENOMEM;
}
usb_fill_bulk_urb(dev->urb_list[i],
diff --git a/drivers/media/dvb-frontends/stv0910.c b/drivers/media/dvb-frontends/stv0910.c
index 68d7c7b41071..e517ff757744 100644
--- a/drivers/media/dvb-frontends/stv0910.c
+++ b/drivers/media/dvb-frontends/stv0910.c
@@ -5,15 +5,6 @@
* Copyright (C) 2014-2015 Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
* developed for Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/stv0910.h b/drivers/media/dvb-frontends/stv0910.h
index 24ecc6902235..0b6f02ad7910 100644
--- a/drivers/media/dvb-frontends/stv0910.h
+++ b/drivers/media/dvb-frontends/stv0910.h
@@ -5,15 +5,6 @@
* Copyright (C) 2014-2015 Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
* developed for Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _STV0910_H_
diff --git a/drivers/media/dvb-frontends/stv6111.c b/drivers/media/dvb-frontends/stv6111.c
index d5035dac4574..2d0adb6fcb08 100644
--- a/drivers/media/dvb-frontends/stv6111.c
+++ b/drivers/media/dvb-frontends/stv6111.c
@@ -3,15 +3,6 @@
* Driver for the ST STV6111 tuner
*
* Copyright (C) 2014 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/stv6111.h b/drivers/media/dvb-frontends/stv6111.h
index 49e821ac9954..f172c3e3d886 100644
--- a/drivers/media/dvb-frontends/stv6111.h
+++ b/drivers/media/dvb-frontends/stv6111.h
@@ -3,15 +3,6 @@
* Driver for the ST STV6111 tuner
*
* Copyright (C) 2014 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _STV6111_H_
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 2bf9467b917d..71991f8638e6 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -1165,7 +1165,11 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
read_pos += program_info_length;
write_pos += program_info_length;
}
- while (read_pos < length) {
+ while (read_pos + 4 < length) {
+ if (write_pos + 4 >= sizeof(c->operand) - 4) {
+ ret = -EINVAL;
+ goto out;
+ }
c->operand[write_pos++] = msg[read_pos++];
c->operand[write_pos++] = msg[read_pos++];
c->operand[write_pos++] = msg[read_pos++];
@@ -1177,13 +1181,17 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
c->operand[write_pos++] = es_info_length >> 8;
c->operand[write_pos++] = es_info_length & 0xff;
if (es_info_length > 0) {
+ if (read_pos >= length) {
+ ret = -EINVAL;
+ goto out;
+ }
pmt_cmd_id = msg[read_pos++];
if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
dev_err(fdtv->device, "invalid pmt_cmd_id %d at stream level\n",
pmt_cmd_id);
- if (es_info_length > sizeof(c->operand) - 4 -
- write_pos) {
+ if (es_info_length > sizeof(c->operand) - 4 - write_pos ||
+ es_info_length > length - read_pos) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index 9363d005e2b6..e0d57e09dab0 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -134,6 +134,8 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
} else {
data_length = msg->msg[3];
}
+ if (data_length > sizeof(msg->msg) - data_pos)
+ return -EINVAL;
return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 08feb3e8c1bf..d6a5d4ca439a 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -450,6 +450,7 @@ config VIDEO_TW9906
config VIDEO_TW9910
tristate "Techwell TW9910 video decoder"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for Techwell TW9910 NTSC/PAL/SECAM video decoder.
@@ -597,6 +598,7 @@ config VIDEO_AK881X
config VIDEO_THS8200
tristate "Texas Instruments THS8200 video encoder"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for the Texas Instruments THS8200 video encoder.
@@ -610,6 +612,7 @@ menu "Video improvement chips"
config VIDEO_UPD64031A
tristate "NEC Electronics uPD64031A Ghost Reduction"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for the NEC Electronics uPD64031A Ghost Reduction
video chip. It is most often found in NTSC TV cards made for
@@ -742,6 +745,19 @@ config VIDEO_HI556
To compile this driver as a module, choose M here: the
module will be called hi556.
+config VIDEO_HI846
+ tristate "Hynix Hi-846 sensor support"
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Hynix
+ Hi-846 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hi846.
+
config VIDEO_IMX208
tristate "Sony IMX208 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
@@ -1186,6 +1202,16 @@ config VIDEO_OV13858
This is a Video4Linux2 sensor driver for the OmniVision
OV13858 camera.
+config VIDEO_OV13B10
+ tristate "OmniVision OV13B10 sensor support"
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV13B10 camera.
+
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -1229,6 +1255,7 @@ config VIDEO_MT9P031
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select VIDEO_APTINA_PLL
+ select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt9p031 5 Mpixel camera.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 83268f20aa3a..4d4fe08d7a6a 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_VIDEO_OV9640) += ov9640.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_OV9734) += ov9734.o
obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
+obj-$(CONFIG_VIDEO_OV13B10) += ov13b10.o
obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
@@ -117,6 +118,7 @@ obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
obj-$(CONFIG_VIDEO_HI556) += hi556.o
+obj-$(CONFIG_VIDEO_HI846) += hi846.o
obj-$(CONFIG_VIDEO_IMX208) += imx208.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX219) += imx219.o
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 122e1fdccd96..44768b59a6ff 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -41,7 +41,7 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
-MODULE_DESCRIPTION("Analog Devices ADV7604 video decoder driver");
+MODULE_DESCRIPTION("Analog Devices ADV7604/10/11/12 video decoder driver");
MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
MODULE_AUTHOR("Mats Randgaard <mats.randgaard@cisco.com>");
MODULE_LICENSE("GPL");
@@ -77,7 +77,7 @@ MODULE_LICENSE("GPL");
enum adv76xx_type {
ADV7604,
- ADV7611,
+ ADV7611, // including ADV7610
ADV7612,
};
@@ -3176,6 +3176,7 @@ static const struct adv76xx_chip_info adv76xx_chip_info[] = {
static const struct i2c_device_id adv76xx_i2c_id[] = {
{ "adv7604", (kernel_ulong_t)&adv76xx_chip_info[ADV7604] },
+ { "adv7610", (kernel_ulong_t)&adv76xx_chip_info[ADV7611] },
{ "adv7611", (kernel_ulong_t)&adv76xx_chip_info[ADV7611] },
{ "adv7612", (kernel_ulong_t)&adv76xx_chip_info[ADV7612] },
{ }
@@ -3183,6 +3184,7 @@ static const struct i2c_device_id adv76xx_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, adv76xx_i2c_id);
static const struct of_device_id adv76xx_of_id[] __maybe_unused = {
+ { .compatible = "adi,adv7610", .data = &adv76xx_chip_info[ADV7611] },
{ .compatible = "adi,adv7611", .data = &adv76xx_chip_info[ADV7611] },
{ .compatible = "adi,adv7612", .data = &adv76xx_chip_info[ADV7612] },
{ }
@@ -3500,8 +3502,8 @@ static int adv76xx_probe(struct i2c_client *client,
return -ENODEV;
}
if (val != 0x68) {
- v4l2_err(sd, "not an adv7604 on address 0x%x\n",
- client->addr << 1);
+ v4l2_err(sd, "not an ADV7604 on address 0x%x\n",
+ client->addr << 1);
return -ENODEV;
}
break;
@@ -3525,8 +3527,9 @@ static int adv76xx_probe(struct i2c_client *client,
val |= val2;
if ((state->info->type == ADV7611 && val != 0x2051) ||
(state->info->type == ADV7612 && val != 0x2041)) {
- v4l2_err(sd, "not an adv761x on address 0x%x\n",
- client->addr << 1);
+ v4l2_err(sd, "not an %s on address 0x%x\n",
+ state->info->type == ADV7611 ? "ADV7610/11" : "ADV7612",
+ client->addr << 1);
return -ENODEV;
}
break;
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index c8b4292512dc..3863dfeb8293 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#define DW9714_NAME "dw9714"
#define DW9714_MAX_FOCUS_POS 1023
@@ -100,7 +101,15 @@ static const struct v4l2_subdev_internal_ops dw9714_int_ops = {
.close = dw9714_close,
};
-static const struct v4l2_subdev_ops dw9714_ops = { };
+static const struct v4l2_subdev_core_ops dw9714_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops dw9714_ops = {
+ .core = &dw9714_core_ops,
+};
static void dw9714_subdev_cleanup(struct dw9714_device *dw9714_dev)
{
@@ -137,7 +146,8 @@ static int dw9714_probe(struct i2c_client *client)
return -ENOMEM;
v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops);
- dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
dw9714_dev->sd.internal_ops = &dw9714_int_ops;
rval = dw9714_init_controls(dw9714_dev);
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
new file mode 100644
index 000000000000..822ce3021fde
--- /dev/null
+++ b/drivers/media/i2c/hi846.c
@@ -0,0 +1,2190 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2021 Purism SPC
+
+#include <asm/unaligned.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define HI846_MEDIA_BUS_FORMAT MEDIA_BUS_FMT_SGBRG10_1X10
+#define HI846_RGB_DEPTH 10
+
+/* Frame length lines / vertical timings */
+#define HI846_REG_FLL 0x0006
+#define HI846_FLL_MAX 0xffff
+
+/* Horizontal timing */
+#define HI846_REG_LLP 0x0008
+#define HI846_LINE_LENGTH 3800
+
+#define HI846_REG_BINNING_MODE 0x000c
+
+#define HI846_REG_IMAGE_ORIENTATION 0x000e
+
+#define HI846_REG_UNKNOWN_0022 0x0022
+
+#define HI846_REG_Y_ADDR_START_VACT_H 0x0026
+#define HI846_REG_Y_ADDR_START_VACT_L 0x0027
+#define HI846_REG_UNKNOWN_0028 0x0028
+
+#define HI846_REG_Y_ADDR_END_VACT_H 0x002c
+#define HI846_REG_Y_ADDR_END_VACT_L 0x002d
+
+#define HI846_REG_Y_ODD_INC_FOBP 0x002e
+#define HI846_REG_Y_EVEN_INC_FOBP 0x002f
+
+#define HI846_REG_Y_ODD_INC_VACT 0x0032
+#define HI846_REG_Y_EVEN_INC_VACT 0x0033
+
+#define HI846_REG_GROUPED_PARA_HOLD 0x0046
+
+#define HI846_REG_TG_ENABLE 0x004c
+
+#define HI846_REG_UNKNOWN_005C 0x005c
+
+#define HI846_REG_UNKNOWN_006A 0x006a
+
+/*
+ * Long exposure time. Actually, exposure is a 20 bit value that
+ * includes the lower 4 bits of 0x0073 too. Only 16 bits are used
+ * right now
+ */
+#define HI846_REG_EXPOSURE 0x0074
+#define HI846_EXPOSURE_MIN 6
+#define HI846_EXPOSURE_MAX_MARGIN 2
+#define HI846_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define HI846_REG_ANALOG_GAIN 0x0077
+#define HI846_ANAL_GAIN_MIN 0
+#define HI846_ANAL_GAIN_MAX 240
+#define HI846_ANAL_GAIN_STEP 8
+
+/* Digital gain controls from sensor */
+#define HI846_REG_MWB_GR_GAIN_H 0x0078
+#define HI846_REG_MWB_GR_GAIN_L 0x0079
+#define HI846_REG_MWB_GB_GAIN_H 0x007a
+#define HI846_REG_MWB_GB_GAIN_L 0x007b
+#define HI846_REG_MWB_R_GAIN_H 0x007c
+#define HI846_REG_MWB_R_GAIN_L 0x007d
+#define HI846_REG_MWB_B_GAIN_H 0x007e
+#define HI846_REG_MWB_B_GAIN_L 0x007f
+#define HI846_DGTL_GAIN_MIN 512
+#define HI846_DGTL_GAIN_MAX 8191
+#define HI846_DGTL_GAIN_STEP 1
+#define HI846_DGTL_GAIN_DEFAULT 512
+
+#define HI846_REG_X_ADDR_START_HACT_H 0x0120
+#define HI846_REG_X_ADDR_END_HACT_H 0x0122
+
+#define HI846_REG_UNKNOWN_012A 0x012a
+
+#define HI846_REG_UNKNOWN_0200 0x0200
+
+#define HI846_REG_UNKNOWN_021C 0x021c
+#define HI846_REG_UNKNOWN_021E 0x021e
+
+#define HI846_REG_UNKNOWN_0402 0x0402
+#define HI846_REG_UNKNOWN_0404 0x0404
+#define HI846_REG_UNKNOWN_0408 0x0408
+#define HI846_REG_UNKNOWN_0410 0x0410
+#define HI846_REG_UNKNOWN_0412 0x0412
+#define HI846_REG_UNKNOWN_0414 0x0414
+
+#define HI846_REG_UNKNOWN_0418 0x0418
+
+#define HI846_REG_UNKNOWN_051E 0x051e
+
+/* Formatter */
+#define HI846_REG_X_START_H 0x0804
+#define HI846_REG_X_START_L 0x0805
+
+/* MIPI */
+#define HI846_REG_UNKNOWN_0900 0x0900
+#define HI846_REG_MIPI_TX_OP_EN 0x0901
+#define HI846_REG_MIPI_TX_OP_MODE 0x0902
+#define HI846_RAW8 BIT(5)
+
+#define HI846_REG_UNKNOWN_090C 0x090c
+#define HI846_REG_UNKNOWN_090E 0x090e
+
+#define HI846_REG_UNKNOWN_0914 0x0914
+#define HI846_REG_TLPX 0x0915
+#define HI846_REG_TCLK_PREPARE 0x0916
+#define HI846_REG_TCLK_ZERO 0x0917
+#define HI846_REG_UNKNOWN_0918 0x0918
+#define HI846_REG_THS_PREPARE 0x0919
+#define HI846_REG_THS_ZERO 0x091a
+#define HI846_REG_THS_TRAIL 0x091b
+#define HI846_REG_TCLK_POST 0x091c
+#define HI846_REG_TCLK_TRAIL_MIN 0x091d
+#define HI846_REG_UNKNOWN_091E 0x091e
+
+#define HI846_REG_UNKNOWN_0954 0x0954
+#define HI846_REG_UNKNOWN_0956 0x0956
+#define HI846_REG_UNKNOWN_0958 0x0958
+#define HI846_REG_UNKNOWN_095A 0x095a
+
+/* ISP Common */
+#define HI846_REG_MODE_SELECT 0x0a00
+#define HI846_MODE_STANDBY 0x00
+#define HI846_MODE_STREAMING 0x01
+#define HI846_REG_FAST_STANDBY_MODE 0x0a02
+#define HI846_REG_ISP_EN_H 0x0a04
+
+/* Test Pattern Control */
+#define HI846_REG_ISP 0x0a05
+#define HI846_REG_ISP_TPG_EN 0x01
+#define HI846_REG_TEST_PATTERN 0x020a /* 1-9 */
+
+#define HI846_REG_UNKNOWN_0A0C 0x0a0c
+
+/* Windowing */
+#define HI846_REG_X_OUTPUT_SIZE_H 0x0a12
+#define HI846_REG_X_OUTPUT_SIZE_L 0x0a13
+#define HI846_REG_Y_OUTPUT_SIZE_H 0x0a14
+#define HI846_REG_Y_OUTPUT_SIZE_L 0x0a15
+
+/* ISP Common */
+#define HI846_REG_PEDESTAL_EN 0x0a1a
+
+#define HI846_REG_UNKNOWN_0A1E 0x0a1e
+
+/* Horizontal Binning Mode */
+#define HI846_REG_HBIN_MODE 0x0a22
+
+#define HI846_REG_UNKNOWN_0A24 0x0a24
+#define HI846_REG_UNKNOWN_0B02 0x0b02
+#define HI846_REG_UNKNOWN_0B10 0x0b10
+#define HI846_REG_UNKNOWN_0B12 0x0b12
+#define HI846_REG_UNKNOWN_0B14 0x0b14
+
+/* BLC (Black Level Calibration) */
+#define HI846_REG_BLC_CTL0 0x0c00
+
+#define HI846_REG_UNKNOWN_0C06 0x0c06
+#define HI846_REG_UNKNOWN_0C10 0x0c10
+#define HI846_REG_UNKNOWN_0C12 0x0c12
+#define HI846_REG_UNKNOWN_0C14 0x0c14
+#define HI846_REG_UNKNOWN_0C16 0x0c16
+
+#define HI846_REG_UNKNOWN_0E04 0x0e04
+
+#define HI846_REG_CHIP_ID_L 0x0f16
+#define HI846_REG_CHIP_ID_H 0x0f17
+#define HI846_CHIP_ID_L 0x46
+#define HI846_CHIP_ID_H 0x08
+
+#define HI846_REG_UNKNOWN_0F04 0x0f04
+#define HI846_REG_UNKNOWN_0F08 0x0f08
+
+/* PLL */
+#define HI846_REG_PLL_CFG_MIPI2_H 0x0f2a
+#define HI846_REG_PLL_CFG_MIPI2_L 0x0f2b
+
+#define HI846_REG_UNKNOWN_0F30 0x0f30
+#define HI846_REG_PLL_CFG_RAMP1_H 0x0f32
+#define HI846_REG_UNKNOWN_0F36 0x0f36
+#define HI846_REG_PLL_CFG_MIPI1_H 0x0f38
+
+#define HI846_REG_UNKNOWN_2008 0x2008
+#define HI846_REG_UNKNOWN_326E 0x326e
+
+struct hi846_reg {
+ u16 address;
+ u16 val;
+};
+
+struct hi846_reg_list {
+ u32 num_of_regs;
+ const struct hi846_reg *regs;
+};
+
+struct hi846_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timing size */
+ u32 llp;
+
+ /* Link frequency needed for this resolution */
+ u8 link_freq_index;
+
+ u16 fps;
+
+ /* Vertical timining size */
+ u16 frame_len;
+
+ const struct hi846_reg_list reg_list_config;
+ const struct hi846_reg_list reg_list_2lane;
+ const struct hi846_reg_list reg_list_4lane;
+
+ /* Position inside of the 3264x2448 pixel array */
+ struct v4l2_rect crop;
+};
+
+static const struct hi846_reg hi846_init_2lane[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ /* regs below are unknown */
+ {0x2000, 0x100a},
+ {0x2002, 0x00ff},
+ {0x2004, 0x0007},
+ {0x2006, 0x3fff},
+ {0x2008, 0x3fff},
+ {0x200a, 0xc216},
+ {0x200c, 0x1292},
+ {0x200e, 0xc01a},
+ {0x2010, 0x403d},
+ {0x2012, 0x000e},
+ {0x2014, 0x403e},
+ {0x2016, 0x0b80},
+ {0x2018, 0x403f},
+ {0x201a, 0x82ae},
+ {0x201c, 0x1292},
+ {0x201e, 0xc00c},
+ {0x2020, 0x4130},
+ {0x2022, 0x43e2},
+ {0x2024, 0x0180},
+ {0x2026, 0x4130},
+ {0x2028, 0x7400},
+ {0x202a, 0x5000},
+ {0x202c, 0x0253},
+ {0x202e, 0x0ad1},
+ {0x2030, 0x2360},
+ {0x2032, 0x0009},
+ {0x2034, 0x5020},
+ {0x2036, 0x000b},
+ {0x2038, 0x0002},
+ {0x203a, 0x0044},
+ {0x203c, 0x0016},
+ {0x203e, 0x1792},
+ {0x2040, 0x7002},
+ {0x2042, 0x154f},
+ {0x2044, 0x00d5},
+ {0x2046, 0x000b},
+ {0x2048, 0x0019},
+ {0x204a, 0x1698},
+ {0x204c, 0x000e},
+ {0x204e, 0x099a},
+ {0x2050, 0x0058},
+ {0x2052, 0x7000},
+ {0x2054, 0x1799},
+ {0x2056, 0x0310},
+ {0x2058, 0x03c3},
+ {0x205a, 0x004c},
+ {0x205c, 0x064a},
+ {0x205e, 0x0001},
+ {0x2060, 0x0007},
+ {0x2062, 0x0bc7},
+ {0x2064, 0x0055},
+ {0x2066, 0x7000},
+ {0x2068, 0x1550},
+ {0x206a, 0x158a},
+ {0x206c, 0x0004},
+ {0x206e, 0x1488},
+ {0x2070, 0x7010},
+ {0x2072, 0x1508},
+ {0x2074, 0x0004},
+ {0x2076, 0x0016},
+ {0x2078, 0x03d5},
+ {0x207a, 0x0055},
+ {0x207c, 0x08ca},
+ {0x207e, 0x2019},
+ {0x2080, 0x0007},
+ {0x2082, 0x7057},
+ {0x2084, 0x0fc7},
+ {0x2086, 0x5041},
+ {0x2088, 0x12c8},
+ {0x208a, 0x5060},
+ {0x208c, 0x5080},
+ {0x208e, 0x2084},
+ {0x2090, 0x12c8},
+ {0x2092, 0x7800},
+ {0x2094, 0x0802},
+ {0x2096, 0x040f},
+ {0x2098, 0x1007},
+ {0x209a, 0x0803},
+ {0x209c, 0x080b},
+ {0x209e, 0x3803},
+ {0x20a0, 0x0807},
+ {0x20a2, 0x0404},
+ {0x20a4, 0x0400},
+ {0x20a6, 0xffff},
+ {0x20a8, 0xf0b2},
+ {0x20aa, 0xffef},
+ {0x20ac, 0x0a84},
+ {0x20ae, 0x1292},
+ {0x20b0, 0xc02e},
+ {0x20b2, 0x4130},
+ {0x23fe, 0xc056},
+ {0x3232, 0xfc0c},
+ {0x3236, 0xfc22},
+ {0x3248, 0xfca8},
+ {0x326a, 0x8302},
+ {0x326c, 0x830a},
+ {0x326e, 0x0000},
+ {0x32ca, 0xfc28},
+ {0x32cc, 0xc3bc},
+ {0x32ce, 0xc34c},
+ {0x32d0, 0xc35a},
+ {0x32d2, 0xc368},
+ {0x32d4, 0xc376},
+ {0x32d6, 0xc3c2},
+ {0x32d8, 0xc3e6},
+ {0x32da, 0x0003},
+ {0x32dc, 0x0003},
+ {0x32de, 0x00c7},
+ {0x32e0, 0x0031},
+ {0x32e2, 0x0031},
+ {0x32e4, 0x0031},
+ {0x32e6, 0xfc28},
+ {0x32e8, 0xc3bc},
+ {0x32ea, 0xc384},
+ {0x32ec, 0xc392},
+ {0x32ee, 0xc3a0},
+ {0x32f0, 0xc3ae},
+ {0x32f2, 0xc3c4},
+ {0x32f4, 0xc3e6},
+ {0x32f6, 0x0003},
+ {0x32f8, 0x0003},
+ {0x32fa, 0x00c7},
+ {0x32fc, 0x0031},
+ {0x32fe, 0x0031},
+ {0x3300, 0x0031},
+ {0x3302, 0x82ca},
+ {0x3304, 0xc164},
+ {0x3306, 0x82e6},
+ {0x3308, 0xc19c},
+ {0x330a, 0x001f},
+ {0x330c, 0x001a},
+ {0x330e, 0x0034},
+ {0x3310, 0x0000},
+ {0x3312, 0x0000},
+ {0x3314, 0xfc94},
+ {0x3316, 0xc3d8},
+ /* regs above are unknown */
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_UNKNOWN_0E04, 0x0012},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x1111},
+ {HI846_REG_Y_ODD_INC_VACT, 0x1111},
+ {HI846_REG_UNKNOWN_0022, 0x0008},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0040},
+ {HI846_REG_UNKNOWN_0028, 0x0017},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x09cf},
+ {HI846_REG_UNKNOWN_005C, 0x2101},
+ {HI846_REG_FLL, 0x09de},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_IMAGE_ORIENTATION, 0x0100},
+ {HI846_REG_BINNING_MODE, 0x0022},
+ {HI846_REG_HBIN_MODE, 0x0000},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x0000},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0cc0},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x0990},
+ {HI846_REG_EXPOSURE, 0x09d8},
+ {HI846_REG_ANALOG_GAIN, 0x0000},
+ {HI846_REG_GROUPED_PARA_HOLD, 0x0000},
+ {HI846_REG_UNKNOWN_051E, 0x0000},
+ {HI846_REG_UNKNOWN_0200, 0x0400},
+ {HI846_REG_PEDESTAL_EN, 0x0c00},
+ {HI846_REG_UNKNOWN_0A0C, 0x0010},
+ {HI846_REG_UNKNOWN_0A1E, 0x0ccf},
+ {HI846_REG_UNKNOWN_0402, 0x0110},
+ {HI846_REG_UNKNOWN_0404, 0x00f4},
+ {HI846_REG_UNKNOWN_0408, 0x0000},
+ {HI846_REG_UNKNOWN_0410, 0x008d},
+ {HI846_REG_UNKNOWN_0412, 0x011a},
+ {HI846_REG_UNKNOWN_0414, 0x864c},
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+ {HI846_REG_BLC_CTL0, 0x9150},
+ {HI846_REG_UNKNOWN_0C06, 0x0021},
+ {HI846_REG_UNKNOWN_0C10, 0x0040},
+ {HI846_REG_UNKNOWN_0C12, 0x0040},
+ {HI846_REG_UNKNOWN_0C14, 0x0040},
+ {HI846_REG_UNKNOWN_0C16, 0x0040},
+ {HI846_REG_FAST_STANDBY_MODE, 0x0100},
+ {HI846_REG_ISP_EN_H, 0x014a},
+ {HI846_REG_UNKNOWN_0418, 0x0000},
+ {HI846_REG_UNKNOWN_012A, 0x03b4},
+ {HI846_REG_X_ADDR_START_HACT_H, 0x0046},
+ {HI846_REG_X_ADDR_END_HACT_H, 0x0376},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x6821},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0001},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+ {HI846_REG_UNKNOWN_0900, 0x0320},
+ {HI846_REG_MIPI_TX_OP_MODE, 0xc31a},
+ {HI846_REG_UNKNOWN_0914, 0xc109},
+ {HI846_REG_TCLK_PREPARE, 0x061a},
+ {HI846_REG_UNKNOWN_0918, 0x0306},
+ {HI846_REG_THS_ZERO, 0x0b09},
+ {HI846_REG_TCLK_POST, 0x0c07},
+ {HI846_REG_UNKNOWN_091E, 0x0a00},
+ {HI846_REG_UNKNOWN_090C, 0x042a},
+ {HI846_REG_UNKNOWN_090E, 0x006b},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca00},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_UNKNOWN_0F08, 0x2f04},
+ {HI846_REG_UNKNOWN_0F30, 0x001f},
+ {HI846_REG_UNKNOWN_0F36, 0x001f},
+ {HI846_REG_UNKNOWN_0F04, 0x3a00},
+ {HI846_REG_PLL_CFG_RAMP1_H, 0x025a},
+ {HI846_REG_PLL_CFG_MIPI1_H, 0x025a},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x0024},
+ {HI846_REG_UNKNOWN_006A, 0x0100},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg hi846_init_4lane[] = {
+ {0x2000, 0x987a},
+ {0x2002, 0x00ff},
+ {0x2004, 0x0047},
+ {0x2006, 0x3fff},
+ {0x2008, 0x3fff},
+ {0x200a, 0xc216},
+ {0x200c, 0x1292},
+ {0x200e, 0xc01a},
+ {0x2010, 0x403d},
+ {0x2012, 0x000e},
+ {0x2014, 0x403e},
+ {0x2016, 0x0b80},
+ {0x2018, 0x403f},
+ {0x201a, 0x82ae},
+ {0x201c, 0x1292},
+ {0x201e, 0xc00c},
+ {0x2020, 0x4130},
+ {0x2022, 0x43e2},
+ {0x2024, 0x0180},
+ {0x2026, 0x4130},
+ {0x2028, 0x7400},
+ {0x202a, 0x5000},
+ {0x202c, 0x0253},
+ {0x202e, 0x0ad1},
+ {0x2030, 0x2360},
+ {0x2032, 0x0009},
+ {0x2034, 0x5020},
+ {0x2036, 0x000b},
+ {0x2038, 0x0002},
+ {0x203a, 0x0044},
+ {0x203c, 0x0016},
+ {0x203e, 0x1792},
+ {0x2040, 0x7002},
+ {0x2042, 0x154f},
+ {0x2044, 0x00d5},
+ {0x2046, 0x000b},
+ {0x2048, 0x0019},
+ {0x204a, 0x1698},
+ {0x204c, 0x000e},
+ {0x204e, 0x099a},
+ {0x2050, 0x0058},
+ {0x2052, 0x7000},
+ {0x2054, 0x1799},
+ {0x2056, 0x0310},
+ {0x2058, 0x03c3},
+ {0x205a, 0x004c},
+ {0x205c, 0x064a},
+ {0x205e, 0x0001},
+ {0x2060, 0x0007},
+ {0x2062, 0x0bc7},
+ {0x2064, 0x0055},
+ {0x2066, 0x7000},
+ {0x2068, 0x1550},
+ {0x206a, 0x158a},
+ {0x206c, 0x0004},
+ {0x206e, 0x1488},
+ {0x2070, 0x7010},
+ {0x2072, 0x1508},
+ {0x2074, 0x0004},
+ {0x2076, 0x0016},
+ {0x2078, 0x03d5},
+ {0x207a, 0x0055},
+ {0x207c, 0x08ca},
+ {0x207e, 0x2019},
+ {0x2080, 0x0007},
+ {0x2082, 0x7057},
+ {0x2084, 0x0fc7},
+ {0x2086, 0x5041},
+ {0x2088, 0x12c8},
+ {0x208a, 0x5060},
+ {0x208c, 0x5080},
+ {0x208e, 0x2084},
+ {0x2090, 0x12c8},
+ {0x2092, 0x7800},
+ {0x2094, 0x0802},
+ {0x2096, 0x040f},
+ {0x2098, 0x1007},
+ {0x209a, 0x0803},
+ {0x209c, 0x080b},
+ {0x209e, 0x3803},
+ {0x20a0, 0x0807},
+ {0x20a2, 0x0404},
+ {0x20a4, 0x0400},
+ {0x20a6, 0xffff},
+ {0x20a8, 0xf0b2},
+ {0x20aa, 0xffef},
+ {0x20ac, 0x0a84},
+ {0x20ae, 0x1292},
+ {0x20b0, 0xc02e},
+ {0x20b2, 0x4130},
+ {0x20b4, 0xf0b2},
+ {0x20b6, 0xffbf},
+ {0x20b8, 0x2004},
+ {0x20ba, 0x403f},
+ {0x20bc, 0x00c3},
+ {0x20be, 0x4fe2},
+ {0x20c0, 0x8318},
+ {0x20c2, 0x43cf},
+ {0x20c4, 0x0000},
+ {0x20c6, 0x9382},
+ {0x20c8, 0xc314},
+ {0x20ca, 0x2003},
+ {0x20cc, 0x12b0},
+ {0x20ce, 0xcab0},
+ {0x20d0, 0x4130},
+ {0x20d2, 0x12b0},
+ {0x20d4, 0xc90a},
+ {0x20d6, 0x4130},
+ {0x20d8, 0x42d2},
+ {0x20da, 0x8318},
+ {0x20dc, 0x00c3},
+ {0x20de, 0x9382},
+ {0x20e0, 0xc314},
+ {0x20e2, 0x2009},
+ {0x20e4, 0x120b},
+ {0x20e6, 0x120a},
+ {0x20e8, 0x1209},
+ {0x20ea, 0x1208},
+ {0x20ec, 0x1207},
+ {0x20ee, 0x1206},
+ {0x20f0, 0x4030},
+ {0x20f2, 0xc15e},
+ {0x20f4, 0x4130},
+ {0x20f6, 0x1292},
+ {0x20f8, 0xc008},
+ {0x20fa, 0x4130},
+ {0x20fc, 0x42d2},
+ {0x20fe, 0x82a1},
+ {0x2100, 0x00c2},
+ {0x2102, 0x1292},
+ {0x2104, 0xc040},
+ {0x2106, 0x4130},
+ {0x2108, 0x1292},
+ {0x210a, 0xc006},
+ {0x210c, 0x42a2},
+ {0x210e, 0x7324},
+ {0x2110, 0x9382},
+ {0x2112, 0xc314},
+ {0x2114, 0x2011},
+ {0x2116, 0x425f},
+ {0x2118, 0x82a1},
+ {0x211a, 0xf25f},
+ {0x211c, 0x00c1},
+ {0x211e, 0xf35f},
+ {0x2120, 0x2406},
+ {0x2122, 0x425f},
+ {0x2124, 0x00c0},
+ {0x2126, 0xf37f},
+ {0x2128, 0x522f},
+ {0x212a, 0x4f82},
+ {0x212c, 0x7324},
+ {0x212e, 0x425f},
+ {0x2130, 0x82d4},
+ {0x2132, 0xf35f},
+ {0x2134, 0x4fc2},
+ {0x2136, 0x01b3},
+ {0x2138, 0x93c2},
+ {0x213a, 0x829f},
+ {0x213c, 0x2421},
+ {0x213e, 0x403e},
+ {0x2140, 0xfffe},
+ {0x2142, 0x40b2},
+ {0x2144, 0xec78},
+ {0x2146, 0x831c},
+ {0x2148, 0x40b2},
+ {0x214a, 0xec78},
+ {0x214c, 0x831e},
+ {0x214e, 0x40b2},
+ {0x2150, 0xec78},
+ {0x2152, 0x8320},
+ {0x2154, 0xb3d2},
+ {0x2156, 0x008c},
+ {0x2158, 0x2405},
+ {0x215a, 0x4e0f},
+ {0x215c, 0x503f},
+ {0x215e, 0xffd8},
+ {0x2160, 0x4f82},
+ {0x2162, 0x831c},
+ {0x2164, 0x90f2},
+ {0x2166, 0x0003},
+ {0x2168, 0x008c},
+ {0x216a, 0x2401},
+ {0x216c, 0x4130},
+ {0x216e, 0x421f},
+ {0x2170, 0x831c},
+ {0x2172, 0x5e0f},
+ {0x2174, 0x4f82},
+ {0x2176, 0x831e},
+ {0x2178, 0x5e0f},
+ {0x217a, 0x4f82},
+ {0x217c, 0x8320},
+ {0x217e, 0x3ff6},
+ {0x2180, 0x432e},
+ {0x2182, 0x3fdf},
+ {0x2184, 0x421f},
+ {0x2186, 0x7100},
+ {0x2188, 0x4f0e},
+ {0x218a, 0x503e},
+ {0x218c, 0xffd8},
+ {0x218e, 0x4e82},
+ {0x2190, 0x7a04},
+ {0x2192, 0x421e},
+ {0x2194, 0x831c},
+ {0x2196, 0x5f0e},
+ {0x2198, 0x4e82},
+ {0x219a, 0x7a06},
+ {0x219c, 0x0b00},
+ {0x219e, 0x7304},
+ {0x21a0, 0x0050},
+ {0x21a2, 0x40b2},
+ {0x21a4, 0xd081},
+ {0x21a6, 0x0b88},
+ {0x21a8, 0x421e},
+ {0x21aa, 0x831e},
+ {0x21ac, 0x5f0e},
+ {0x21ae, 0x4e82},
+ {0x21b0, 0x7a0e},
+ {0x21b2, 0x521f},
+ {0x21b4, 0x8320},
+ {0x21b6, 0x4f82},
+ {0x21b8, 0x7a10},
+ {0x21ba, 0x0b00},
+ {0x21bc, 0x7304},
+ {0x21be, 0x007a},
+ {0x21c0, 0x40b2},
+ {0x21c2, 0x0081},
+ {0x21c4, 0x0b88},
+ {0x21c6, 0x4392},
+ {0x21c8, 0x7a0a},
+ {0x21ca, 0x0800},
+ {0x21cc, 0x7a0c},
+ {0x21ce, 0x0b00},
+ {0x21d0, 0x7304},
+ {0x21d2, 0x022b},
+ {0x21d4, 0x40b2},
+ {0x21d6, 0xd081},
+ {0x21d8, 0x0b88},
+ {0x21da, 0x0b00},
+ {0x21dc, 0x7304},
+ {0x21de, 0x0255},
+ {0x21e0, 0x40b2},
+ {0x21e2, 0x0081},
+ {0x21e4, 0x0b88},
+ {0x21e6, 0x4130},
+ {0x23fe, 0xc056},
+ {0x3232, 0xfc0c},
+ {0x3236, 0xfc22},
+ {0x3238, 0xfcfc},
+ {0x323a, 0xfd84},
+ {0x323c, 0xfd08},
+ {0x3246, 0xfcd8},
+ {0x3248, 0xfca8},
+ {0x324e, 0xfcb4},
+ {0x326a, 0x8302},
+ {0x326c, 0x830a},
+ {0x326e, 0x0000},
+ {0x32ca, 0xfc28},
+ {0x32cc, 0xc3bc},
+ {0x32ce, 0xc34c},
+ {0x32d0, 0xc35a},
+ {0x32d2, 0xc368},
+ {0x32d4, 0xc376},
+ {0x32d6, 0xc3c2},
+ {0x32d8, 0xc3e6},
+ {0x32da, 0x0003},
+ {0x32dc, 0x0003},
+ {0x32de, 0x00c7},
+ {0x32e0, 0x0031},
+ {0x32e2, 0x0031},
+ {0x32e4, 0x0031},
+ {0x32e6, 0xfc28},
+ {0x32e8, 0xc3bc},
+ {0x32ea, 0xc384},
+ {0x32ec, 0xc392},
+ {0x32ee, 0xc3a0},
+ {0x32f0, 0xc3ae},
+ {0x32f2, 0xc3c4},
+ {0x32f4, 0xc3e6},
+ {0x32f6, 0x0003},
+ {0x32f8, 0x0003},
+ {0x32fa, 0x00c7},
+ {0x32fc, 0x0031},
+ {0x32fe, 0x0031},
+ {0x3300, 0x0031},
+ {0x3302, 0x82ca},
+ {0x3304, 0xc164},
+ {0x3306, 0x82e6},
+ {0x3308, 0xc19c},
+ {0x330a, 0x001f},
+ {0x330c, 0x001a},
+ {0x330e, 0x0034},
+ {0x3310, 0x0000},
+ {0x3312, 0x0000},
+ {0x3314, 0xfc94},
+ {0x3316, 0xc3d8},
+
+ {0x0a00, 0x0000},
+ {0x0e04, 0x0012},
+ {0x002e, 0x1111},
+ {0x0032, 0x1111},
+ {0x0022, 0x0008},
+ {0x0026, 0x0040},
+ {0x0028, 0x0017},
+ {0x002c, 0x09cf},
+ {0x005c, 0x2101},
+ {0x0006, 0x09de},
+ {0x0008, 0x0ed8},
+ {0x000e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0a22, 0x0000},
+ {0x0a24, 0x0000},
+ {0x0804, 0x0000},
+ {0x0a12, 0x0cc0},
+ {0x0a14, 0x0990},
+ {0x0074, 0x09d8},
+ {0x0076, 0x0000},
+ {0x051e, 0x0000},
+ {0x0200, 0x0400},
+ {0x0a1a, 0x0c00},
+ {0x0a0c, 0x0010},
+ {0x0a1e, 0x0ccf},
+ {0x0402, 0x0110},
+ {0x0404, 0x00f4},
+ {0x0408, 0x0000},
+ {0x0410, 0x008d},
+ {0x0412, 0x011a},
+ {0x0414, 0x864c},
+ /* for OTP */
+ {0x021c, 0x0003},
+ {0x021e, 0x0235},
+ /* for OTP */
+ {0x0c00, 0x9950},
+ {0x0c06, 0x0021},
+ {0x0c10, 0x0040},
+ {0x0c12, 0x0040},
+ {0x0c14, 0x0040},
+ {0x0c16, 0x0040},
+ {0x0a02, 0x0100},
+ {0x0a04, 0x015a},
+ {0x0418, 0x0000},
+ {0x0128, 0x0028},
+ {0x012a, 0xffff},
+ {0x0120, 0x0046},
+ {0x0122, 0x0376},
+ {0x012c, 0x0020},
+ {0x012e, 0xffff},
+ {0x0124, 0x0040},
+ {0x0126, 0x0378},
+ {0x0746, 0x0050},
+ {0x0748, 0x01d5},
+ {0x074a, 0x022b},
+ {0x074c, 0x03b0},
+ {0x0756, 0x043f},
+ {0x0758, 0x3f1d},
+ {0x0b02, 0xe04d},
+ {0x0b10, 0x6821},
+ {0x0b12, 0x0120},
+ {0x0b14, 0x0001},
+ {0x2008, 0x38fd},
+ {0x326e, 0x0000},
+ {0x0900, 0x0300},
+ {0x0902, 0xc319},
+ {0x0914, 0xc109},
+ {0x0916, 0x061a},
+ {0x0918, 0x0407},
+ {0x091a, 0x0a0b},
+ {0x091c, 0x0e08},
+ {0x091e, 0x0a00},
+ {0x090c, 0x0427},
+ {0x090e, 0x0059},
+ {0x0954, 0x0089},
+ {0x0956, 0x0000},
+ {0x0958, 0xca80},
+ {0x095a, 0x9240},
+ {0x0f08, 0x2f04},
+ {0x0f30, 0x001f},
+ {0x0f36, 0x001f},
+ {0x0f04, 0x3a00},
+ {0x0f32, 0x025a},
+ {0x0f38, 0x025a},
+ {0x0f2a, 0x4124},
+ {0x006a, 0x0100},
+ {0x004c, 0x0100},
+ {0x0044, 0x0001},
+};
+
+static const struct hi846_reg mode_640x480_config[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x7711},
+ {HI846_REG_Y_ODD_INC_VACT, 0x7711},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0148},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x08c7},
+ {HI846_REG_UNKNOWN_005C, 0x4404},
+ {HI846_REG_FLL, 0x0277},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_BINNING_MODE, 0x0322},
+ {HI846_REG_HBIN_MODE, 0x0200},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x0058},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0280},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x01e0},
+
+ /* For OTP */
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+
+ {HI846_REG_ISP_EN_H, 0x016a},
+ {HI846_REG_UNKNOWN_0418, 0x0210},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x7021},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0001},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+};
+
+static const struct hi846_reg mode_640x480_mipi_2lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0x4319},
+ {HI846_REG_UNKNOWN_0914, 0xc105},
+ {HI846_REG_TCLK_PREPARE, 0x030c},
+ {HI846_REG_UNKNOWN_0918, 0x0304},
+ {HI846_REG_THS_ZERO, 0x0708},
+ {HI846_REG_TCLK_POST, 0x0b04},
+ {HI846_REG_UNKNOWN_091E, 0x0500},
+ {HI846_REG_UNKNOWN_090C, 0x0208},
+ {HI846_REG_UNKNOWN_090E, 0x009a},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4924},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1280x720_config[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x3311},
+ {HI846_REG_Y_ODD_INC_VACT, 0x3311},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0238},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x07d7},
+ {HI846_REG_UNKNOWN_005C, 0x4202},
+ {HI846_REG_FLL, 0x034a},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_BINNING_MODE, 0x0122},
+ {HI846_REG_HBIN_MODE, 0x0100},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x00b0},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0500},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x02d0},
+ {HI846_REG_EXPOSURE, 0x0344},
+
+ /* For OTP */
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+
+ {HI846_REG_ISP_EN_H, 0x016a},
+ {HI846_REG_UNKNOWN_0418, 0x0410},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x6c21},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0005},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+};
+
+static const struct hi846_reg mode_1280x720_mipi_2lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0x4319},
+ {HI846_REG_UNKNOWN_0914, 0xc109},
+ {HI846_REG_TCLK_PREPARE, 0x061a},
+ {HI846_REG_UNKNOWN_0918, 0x0407},
+ {HI846_REG_THS_ZERO, 0x0a0b},
+ {HI846_REG_TCLK_POST, 0x0e08},
+ {HI846_REG_UNKNOWN_091E, 0x0a00},
+ {HI846_REG_UNKNOWN_090C, 0x0427},
+ {HI846_REG_UNKNOWN_090E, 0x0145},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4124},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1280x720_mipi_4lane[] = {
+ /* 360Mbps */
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0xc319},
+ {HI846_REG_UNKNOWN_0914, 0xc105},
+ {HI846_REG_TCLK_PREPARE, 0x030c},
+ {HI846_REG_UNKNOWN_0918, 0x0304},
+ {HI846_REG_THS_ZERO, 0x0708},
+ {HI846_REG_TCLK_POST, 0x0b04},
+ {HI846_REG_UNKNOWN_091E, 0x0500},
+ {HI846_REG_UNKNOWN_090C, 0x0208},
+ {HI846_REG_UNKNOWN_090E, 0x008a},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4924},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1632x1224_config[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x3311},
+ {HI846_REG_Y_ODD_INC_VACT, 0x3311},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0040},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x09cf},
+ {HI846_REG_UNKNOWN_005C, 0x4202},
+ {HI846_REG_FLL, 0x09de},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_BINNING_MODE, 0x0122},
+ {HI846_REG_HBIN_MODE, 0x0100},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x0000},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0660},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x04c8},
+ {HI846_REG_EXPOSURE, 0x09d8},
+
+ /* For OTP */
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+
+ {HI846_REG_ISP_EN_H, 0x016a},
+ {HI846_REG_UNKNOWN_0418, 0x0000},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x6c21},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0005},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+};
+
+static const struct hi846_reg mode_1632x1224_mipi_2lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0x4319},
+ {HI846_REG_UNKNOWN_0914, 0xc109},
+ {HI846_REG_TCLK_PREPARE, 0x061a},
+ {HI846_REG_UNKNOWN_0918, 0x0407},
+ {HI846_REG_THS_ZERO, 0x0a0b},
+ {HI846_REG_TCLK_POST, 0x0e08},
+ {HI846_REG_UNKNOWN_091E, 0x0a00},
+ {HI846_REG_UNKNOWN_090C, 0x0427},
+ {HI846_REG_UNKNOWN_090E, 0x0069},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4124},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1632x1224_mipi_4lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0xc319},
+ {HI846_REG_UNKNOWN_0914, 0xc105},
+ {HI846_REG_TCLK_PREPARE, 0x030c},
+ {HI846_REG_UNKNOWN_0918, 0x0304},
+ {HI846_REG_THS_ZERO, 0x0708},
+ {HI846_REG_TCLK_POST, 0x0b04},
+ {HI846_REG_UNKNOWN_091E, 0x0500},
+ {HI846_REG_UNKNOWN_090C, 0x0208},
+ {HI846_REG_UNKNOWN_090E, 0x001c},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4924},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const char * const hi846_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Colour",
+ "100% Colour Bars",
+ "Fade To Grey Colour Bars",
+ "PN9",
+ "Gradient Horizontal",
+ "Gradient Vertical",
+ "Check Board",
+ "Slant Pattern",
+ "Resolution Pattern",
+};
+
+#define FREQ_INDEX_640 0
+#define FREQ_INDEX_1280 1
+static const s64 hi846_link_freqs[] = {
+ [FREQ_INDEX_640] = 80000000,
+ [FREQ_INDEX_1280] = 200000000,
+};
+
+static const struct hi846_reg_list hi846_init_regs_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(hi846_init_2lane),
+ .regs = hi846_init_2lane,
+};
+
+static const struct hi846_reg_list hi846_init_regs_list_4lane = {
+ .num_of_regs = ARRAY_SIZE(hi846_init_4lane),
+ .regs = hi846_init_4lane,
+};
+
+static const struct hi846_mode supported_modes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .link_freq_index = FREQ_INDEX_640,
+ .fps = 120,
+ .frame_len = 631,
+ .llp = HI846_LINE_LENGTH,
+ .reg_list_config = {
+ .num_of_regs = ARRAY_SIZE(mode_640x480_config),
+ .regs = mode_640x480_config,
+ },
+ .reg_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(mode_640x480_mipi_2lane),
+ .regs = mode_640x480_mipi_2lane,
+ },
+ .reg_list_4lane = {
+ .num_of_regs = 0,
+ },
+ .crop = {
+ .left = 0x58,
+ .top = 0x148,
+ .width = 640 * 4,
+ .height = 480 * 4,
+ },
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .link_freq_index = FREQ_INDEX_1280,
+ .fps = 90,
+ .frame_len = 842,
+ .llp = HI846_LINE_LENGTH,
+ .reg_list_config = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_config),
+ .regs = mode_1280x720_config,
+ },
+ .reg_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_mipi_2lane),
+ .regs = mode_1280x720_mipi_2lane,
+ },
+ .reg_list_4lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_mipi_4lane),
+ .regs = mode_1280x720_mipi_4lane,
+ },
+ .crop = {
+ .left = 0xb0,
+ .top = 0x238,
+ .width = 1280 * 2,
+ .height = 720 * 2,
+ },
+ },
+ {
+ .width = 1632,
+ .height = 1224,
+ .link_freq_index = FREQ_INDEX_1280,
+ .fps = 30,
+ .frame_len = 2526,
+ .llp = HI846_LINE_LENGTH,
+ .reg_list_config = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_config),
+ .regs = mode_1632x1224_config,
+ },
+ .reg_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_mipi_2lane),
+ .regs = mode_1632x1224_mipi_2lane,
+ },
+ .reg_list_4lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_mipi_4lane),
+ .regs = mode_1632x1224_mipi_4lane,
+ },
+ .crop = {
+ .left = 0x0,
+ .top = 0x0,
+ .width = 1632 * 2,
+ .height = 1224 * 2,
+ },
+ }
+};
+
+struct hi846_datafmt {
+ u32 code;
+ enum v4l2_colorspace colorspace;
+};
+
+static const char * const hi846_supply_names[] = {
+ "vddio", /* Digital I/O (1.8V or 2.8V) */
+ "vdda", /* Analog (2.8V) */
+ "vddd", /* Digital Core (1.2V) */
+};
+
+#define HI846_NUM_SUPPLIES ARRAY_SIZE(hi846_supply_names)
+
+struct hi846 {
+ struct gpio_desc *rst_gpio;
+ struct gpio_desc *shutdown_gpio;
+ struct regulator_bulk_data supplies[HI846_NUM_SUPPLIES];
+ struct clk *clock;
+ const struct hi846_datafmt *fmt;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+ u8 nr_lanes;
+
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ struct mutex mutex; /* protect cur_mode, streaming and chip access */
+ const struct hi846_mode *cur_mode;
+ bool streaming;
+};
+
+static inline struct hi846 *to_hi846(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct hi846, sd);
+}
+
+static const struct hi846_datafmt hi846_colour_fmts[] = {
+ { HI846_MEDIA_BUS_FORMAT, V4L2_COLORSPACE_RAW },
+};
+
+static const struct hi846_datafmt *hi846_find_datafmt(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hi846_colour_fmts); i++)
+ if (hi846_colour_fmts[i].code == code)
+ return &hi846_colour_fmts[i];
+
+ return NULL;
+}
+
+static inline u8 hi846_get_link_freq_index(struct hi846 *hi846)
+{
+ return hi846->cur_mode->link_freq_index;
+}
+
+static u64 hi846_get_link_freq(struct hi846 *hi846)
+{
+ u8 index = hi846_get_link_freq_index(hi846);
+
+ return hi846_link_freqs[index];
+}
+
+static u64 hi846_calc_pixel_rate(struct hi846 *hi846)
+{
+ u64 link_freq = hi846_get_link_freq(hi846);
+ u64 pixel_rate = link_freq * 2 * hi846->nr_lanes;
+
+ do_div(pixel_rate, HI846_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static int hi846_read_reg(struct hi846 *hi846, u16 reg, u8 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[1] = {0};
+ int ret;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = 1;
+ msgs[1].buf = data_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "i2c read error: %d\n", ret);
+ return -EIO;
+ }
+
+ *val = data_buf[0];
+
+ return 0;
+}
+
+static int hi846_write_reg(struct hi846 *hi846, u16 reg, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ u8 buf[3] = { reg >> 8, reg & 0xff, val };
+ struct i2c_msg msg[] = {
+ { .addr = client->addr, .flags = 0,
+ .len = ARRAY_SIZE(buf), .buf = buf },
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_err(&client->dev, "i2c write error\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void hi846_write_reg_16(struct hi846 *hi846, u16 reg, u16 val, int *err)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ u8 buf[4];
+ int ret;
+
+ if (*err < 0)
+ return;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be16(val, buf + 2);
+ ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ dev_err(&client->dev, "i2c_master_send != %zu: %d\n",
+ sizeof(buf), ret);
+ *err = -EIO;
+ }
+}
+
+static int hi846_write_reg_list(struct hi846 *hi846,
+ const struct hi846_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ hi846_write_reg_16(hi846, r_list->regs[i].address,
+ r_list->regs[i].val, &ret);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x: %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hi846_update_digital_gain(struct hi846 *hi846, u16 d_gain)
+{
+ int ret = 0;
+
+ hi846_write_reg_16(hi846, HI846_REG_MWB_GR_GAIN_H, d_gain, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_MWB_GB_GAIN_H, d_gain, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_MWB_R_GAIN_H, d_gain, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_MWB_B_GAIN_H, d_gain, &ret);
+
+ return ret;
+}
+
+static int hi846_test_pattern(struct hi846 *hi846, u32 pattern)
+{
+ int ret;
+ u8 val;
+
+ if (pattern) {
+ ret = hi846_read_reg(hi846, HI846_REG_ISP, &val);
+ if (ret)
+ return ret;
+
+ ret = hi846_write_reg(hi846, HI846_REG_ISP,
+ val | HI846_REG_ISP_TPG_EN);
+ if (ret)
+ return ret;
+ }
+
+ return hi846_write_reg(hi846, HI846_REG_TEST_PATTERN, pattern);
+}
+
+static int hi846_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hi846 *hi846 = container_of(ctrl->handler,
+ struct hi846, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ s64 exposure_max;
+ int ret = 0;
+ u32 shutter, frame_len;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = hi846->cur_mode->height + ctrl->val -
+ HI846_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(hi846->exposure,
+ hi846->exposure->minimum,
+ exposure_max, hi846->exposure->step,
+ exposure_max);
+ }
+
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = hi846_write_reg(hi846, HI846_REG_ANALOG_GAIN, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = hi846_update_digital_gain(hi846, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ shutter = ctrl->val;
+ frame_len = hi846->cur_mode->frame_len;
+
+ if (shutter > frame_len - 6) { /* margin */
+ frame_len = shutter + 6;
+ if (frame_len > 0xffff) { /* max frame len */
+ frame_len = 0xffff;
+ }
+ }
+
+ if (shutter < 6)
+ shutter = 6;
+ if (shutter > (0xffff - 6))
+ shutter = 0xffff - 6;
+
+ hi846_write_reg_16(hi846, HI846_REG_FLL, frame_len, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_EXPOSURE, shutter, &ret);
+ break;
+
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ hi846_write_reg_16(hi846, HI846_REG_FLL,
+ hi846->cur_mode->height + ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = hi846_test_pattern(hi846, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hi846_ctrl_ops = {
+ .s_ctrl = hi846_set_ctrl,
+};
+
+static int hi846_init_controls(struct hi846 *hi846)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ struct v4l2_fwnode_device_properties props;
+
+ ctrl_hdlr = &hi846->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &hi846->mutex;
+
+ hi846->link_freq =
+ v4l2_ctrl_new_int_menu(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(hi846_link_freqs) - 1,
+ 0, hi846_link_freqs);
+ if (hi846->link_freq)
+ hi846->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hi846->pixel_rate =
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ hi846_calc_pixel_rate(hi846), 1,
+ hi846_calc_pixel_rate(hi846));
+ hi846->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_VBLANK,
+ hi846->cur_mode->frame_len -
+ hi846->cur_mode->height,
+ HI846_FLL_MAX -
+ hi846->cur_mode->height, 1,
+ hi846->cur_mode->frame_len -
+ hi846->cur_mode->height);
+
+ h_blank = hi846->cur_mode->llp - hi846->cur_mode->width;
+
+ hi846->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (hi846->hblank)
+ hi846->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ HI846_ANAL_GAIN_MIN, HI846_ANAL_GAIN_MAX,
+ HI846_ANAL_GAIN_STEP, HI846_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ HI846_DGTL_GAIN_MIN, HI846_DGTL_GAIN_MAX,
+ HI846_DGTL_GAIN_STEP, HI846_DGTL_GAIN_DEFAULT);
+ exposure_max = hi846->cur_mode->frame_len - HI846_EXPOSURE_MAX_MARGIN;
+ hi846->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ HI846_EXPOSURE_MIN, exposure_max,
+ HI846_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hi846_test_pattern_menu) - 1,
+ 0, 0, hi846_test_pattern_menu);
+ if (ctrl_hdlr->error) {
+ dev_err(&client->dev, "v4l ctrl handler error: %d\n",
+ ctrl_hdlr->error);
+ return ctrl_hdlr->error;
+ }
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &hi846_ctrl_ops,
+ &props);
+ if (ret)
+ return ret;
+
+ hi846->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static int hi846_set_video_mode(struct hi846 *hi846, int fps)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ u64 frame_length;
+ int ret = 0;
+ int dummy_lines;
+ u64 link_freq = hi846_get_link_freq(hi846);
+
+ dev_dbg(&client->dev, "%s: link freq: %llu\n", __func__,
+ hi846_get_link_freq(hi846));
+
+ do_div(link_freq, fps);
+ frame_length = link_freq;
+ do_div(frame_length, HI846_LINE_LENGTH);
+
+ dummy_lines = (frame_length > hi846->cur_mode->frame_len) ?
+ (frame_length - hi846->cur_mode->frame_len) : 0;
+
+ frame_length = hi846->cur_mode->frame_len + dummy_lines;
+
+ dev_dbg(&client->dev, "%s: frame length calculated: %llu\n", __func__,
+ frame_length);
+
+ hi846_write_reg_16(hi846, HI846_REG_FLL, frame_length & 0xFFFF, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_LLP,
+ HI846_LINE_LENGTH & 0xFFFF, &ret);
+
+ return ret;
+}
+
+static int hi846_start_streaming(struct hi846 *hi846)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ int ret = 0;
+ u8 val;
+
+ if (hi846->nr_lanes == 2)
+ ret = hi846_write_reg_list(hi846, &hi846_init_regs_list_2lane);
+ else
+ ret = hi846_write_reg_list(hi846, &hi846_init_regs_list_4lane);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls: %d\n", ret);
+ return ret;
+ }
+
+ ret = hi846_write_reg_list(hi846, &hi846->cur_mode->reg_list_config);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode: %d\n", ret);
+ return ret;
+ }
+
+ if (hi846->nr_lanes == 2)
+ ret = hi846_write_reg_list(hi846,
+ &hi846->cur_mode->reg_list_2lane);
+ else
+ ret = hi846_write_reg_list(hi846,
+ &hi846->cur_mode->reg_list_4lane);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mipi mode: %d\n", ret);
+ return ret;
+ }
+
+ hi846_set_video_mode(hi846, hi846->cur_mode->fps);
+
+ ret = __v4l2_ctrl_handler_setup(hi846->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ /*
+ * Reading 0x0034 is purely done for debugging reasons: It is not
+ * documented in the DS but only mentioned once:
+ * "If 0x0034[2] bit is disabled , Visible pixel width and height is 0."
+ * So even though that sounds like we won't see anything, we don't
+ * know more about this, so in that case only inform the user but do
+ * nothing more.
+ */
+ ret = hi846_read_reg(hi846, 0x0034, &val);
+ if (ret)
+ return ret;
+ if (!(val & BIT(2)))
+ dev_info(&client->dev, "visible pixel width and height is 0\n");
+
+ ret = hi846_write_reg(hi846, HI846_REG_MODE_SELECT,
+ HI846_MODE_STREAMING);
+ if (ret) {
+ dev_err(&client->dev, "failed to start stream");
+ return ret;
+ }
+
+ hi846->streaming = 1;
+
+ dev_dbg(&client->dev, "%s: started streaming successfully\n", __func__);
+
+ return ret;
+}
+
+static void hi846_stop_streaming(struct hi846 *hi846)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+
+ if (hi846_write_reg(hi846, HI846_REG_MODE_SELECT, HI846_MODE_STANDBY))
+ dev_err(&client->dev, "failed to stop stream");
+
+ hi846->streaming = 0;
+}
+
+static int hi846_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (hi846->streaming == enable)
+ return 0;
+
+ mutex_lock(&hi846->mutex);
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto out;
+ }
+
+ ret = hi846_start_streaming(hi846);
+ }
+
+ if (!enable || ret) {
+ hi846_stop_streaming(hi846);
+ pm_runtime_put(&client->dev);
+ }
+
+out:
+ mutex_unlock(&hi846->mutex);
+
+ return ret;
+}
+
+static int hi846_power_on(struct hi846 *hi846)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(HI846_NUM_SUPPLIES, hi846->supplies);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(hi846->clock);
+ if (ret < 0)
+ goto err_reg;
+
+ if (hi846->shutdown_gpio)
+ gpiod_set_value_cansleep(hi846->shutdown_gpio, 0);
+
+ /* 30us = 2400 cycles at 80Mhz */
+ usleep_range(30, 60);
+ if (hi846->rst_gpio)
+ gpiod_set_value_cansleep(hi846->rst_gpio, 0);
+ usleep_range(30, 60);
+
+ return 0;
+
+err_reg:
+ regulator_bulk_disable(HI846_NUM_SUPPLIES, hi846->supplies);
+
+ return ret;
+}
+
+static void hi846_power_off(struct hi846 *hi846)
+{
+ if (hi846->rst_gpio)
+ gpiod_set_value_cansleep(hi846->rst_gpio, 1);
+
+ if (hi846->shutdown_gpio)
+ gpiod_set_value_cansleep(hi846->shutdown_gpio, 1);
+
+ clk_disable_unprepare(hi846->clock);
+ regulator_bulk_disable(HI846_NUM_SUPPLIES, hi846->supplies);
+}
+
+static int __maybe_unused hi846_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi846 *hi846 = to_hi846(sd);
+
+ if (hi846->streaming)
+ hi846_stop_streaming(hi846);
+
+ hi846_power_off(hi846);
+
+ return 0;
+}
+
+static int __maybe_unused hi846_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi846 *hi846 = to_hi846(sd);
+ int ret;
+
+ ret = hi846_power_on(hi846);
+ if (ret)
+ return ret;
+
+ if (hi846->streaming) {
+ ret = hi846_start_streaming(hi846);
+ if (ret) {
+ dev_err(dev, "%s: start streaming failed: %d\n",
+ __func__, ret);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ hi846_power_off(hi846);
+ return ret;
+}
+
+static int hi846_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct hi846_datafmt *fmt = hi846_find_datafmt(mf->code);
+ u32 tgt_fps;
+ s32 vblank_def, h_blank;
+
+ if (!fmt) {
+ mf->code = hi846_colour_fmts[0].code;
+ mf->colorspace = hi846_colour_fmts[0].colorspace;
+ fmt = &hi846_colour_fmts[0];
+ }
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = *mf;
+ return 0;
+ }
+
+ if (hi846->nr_lanes == 2) {
+ if (!hi846->cur_mode->reg_list_2lane.num_of_regs) {
+ dev_err(&client->dev,
+ "this mode is not supported for 2 lanes\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!hi846->cur_mode->reg_list_4lane.num_of_regs) {
+ dev_err(&client->dev,
+ "this mode is not supported for 4 lanes\n");
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&hi846->mutex);
+
+ if (hi846->streaming) {
+ mutex_unlock(&hi846->mutex);
+ return -EBUSY;
+ }
+
+ hi846->fmt = fmt;
+
+ hi846->cur_mode =
+ v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height, mf->width, mf->height);
+ dev_dbg(&client->dev, "%s: found mode: %dx%d\n", __func__,
+ hi846->cur_mode->width, hi846->cur_mode->height);
+
+ tgt_fps = hi846->cur_mode->fps;
+ dev_dbg(&client->dev, "%s: target fps: %d\n", __func__, tgt_fps);
+
+ mf->width = hi846->cur_mode->width;
+ mf->height = hi846->cur_mode->height;
+ mf->code = HI846_MEDIA_BUS_FORMAT;
+ mf->field = V4L2_FIELD_NONE;
+
+ __v4l2_ctrl_s_ctrl(hi846->link_freq, hi846_get_link_freq_index(hi846));
+ __v4l2_ctrl_s_ctrl_int64(hi846->pixel_rate,
+ hi846_calc_pixel_rate(hi846));
+
+ /* Update limits and set FPS to default */
+ vblank_def = hi846->cur_mode->frame_len - hi846->cur_mode->height;
+ __v4l2_ctrl_modify_range(hi846->vblank,
+ hi846->cur_mode->frame_len -
+ hi846->cur_mode->height,
+ HI846_FLL_MAX - hi846->cur_mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(hi846->vblank, vblank_def);
+
+ h_blank = hi846->cur_mode->llp - hi846->cur_mode->width;
+
+ __v4l2_ctrl_modify_range(hi846->hblank, h_blank, h_blank, 1,
+ h_blank);
+
+ dev_dbg(&client->dev, "Set fmt w=%d h=%d code=0x%x colorspace=0x%x\n",
+ mf->width, mf->height,
+ fmt->code, fmt->colorspace);
+
+ mutex_unlock(&hi846->mutex);
+
+ return 0;
+}
+
+static int hi846_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format->format = *v4l2_subdev_get_try_format(&hi846->sd,
+ sd_state,
+ format->pad);
+ return 0;
+ }
+
+ mutex_lock(&hi846->mutex);
+ mf->code = HI846_MEDIA_BUS_FORMAT;
+ mf->colorspace = V4L2_COLORSPACE_RAW;
+ mf->field = V4L2_FIELD_NONE;
+ mf->width = hi846->cur_mode->width;
+ mf->height = hi846->cur_mode->height;
+ mutex_unlock(&hi846->mutex);
+ dev_dbg(&client->dev,
+ "Get format w=%d h=%d code=0x%x colorspace=0x%x\n",
+ mf->width, mf->height, mf->code, mf->colorspace);
+
+ return 0;
+}
+
+static int hi846_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index > 0)
+ return -EINVAL;
+
+ code->code = HI846_MEDIA_BUS_FORMAT;
+
+ return 0;
+}
+
+static int hi846_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (fse->pad || fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != HI846_MEDIA_BUS_FORMAT) {
+ dev_err(&client->dev, "frame size enum not matching\n");
+ return -EINVAL;
+ }
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = supported_modes[fse->index].width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = supported_modes[fse->index].height;
+
+ dev_dbg(&client->dev, "%s: max width: %d max height: %d\n", __func__,
+ fse->max_width, fse->max_height);
+
+ return 0;
+}
+
+static int hi846_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ mutex_lock(&hi846->mutex);
+ switch (sel->which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
+ break;
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ sel->r = hi846->cur_mode->crop;
+ break;
+ }
+ mutex_unlock(&hi846->mutex);
+ return 0;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = 3264;
+ sel->r.height = 2448;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hi846_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+
+ mutex_lock(&hi846->mutex);
+ mf->code = HI846_MEDIA_BUS_FORMAT;
+ mf->colorspace = V4L2_COLORSPACE_RAW;
+ mf->field = V4L2_FIELD_NONE;
+ mf->width = hi846->cur_mode->width;
+ mf->height = hi846->cur_mode->height;
+ mutex_unlock(&hi846->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops hi846_video_ops = {
+ .s_stream = hi846_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops hi846_pad_ops = {
+ .init_cfg = hi846_init_cfg,
+ .enum_frame_size = hi846_enum_frame_size,
+ .enum_mbus_code = hi846_enum_mbus_code,
+ .set_fmt = hi846_set_format,
+ .get_fmt = hi846_get_format,
+ .get_selection = hi846_get_selection,
+};
+
+static const struct v4l2_subdev_ops hi846_subdev_ops = {
+ .video = &hi846_video_ops,
+ .pad = &hi846_pad_ops,
+};
+
+static const struct media_entity_operations hi846_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int hi846_identify_module(struct hi846 *hi846)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ int ret;
+ u8 hi, lo;
+
+ ret = hi846_read_reg(hi846, HI846_REG_CHIP_ID_L, &lo);
+ if (ret)
+ return ret;
+
+ if (lo != HI846_CHIP_ID_L) {
+ dev_err(&client->dev, "wrong chip id low byte: %x", lo);
+ return -ENXIO;
+ }
+
+ ret = hi846_read_reg(hi846, HI846_REG_CHIP_ID_H, &hi);
+ if (ret)
+ return ret;
+
+ if (hi != HI846_CHIP_ID_H) {
+ dev_err(&client->dev, "wrong chip id high byte: %x", hi);
+ return -ENXIO;
+ }
+
+ dev_info(&client->dev, "chip id %02X %02X using %d mipi lanes\n",
+ hi, lo, hi846->nr_lanes);
+
+ return 0;
+}
+
+static s64 hi846_check_link_freqs(struct hi846 *hi846,
+ struct v4l2_fwnode_endpoint *ep)
+{
+ const s64 *freqs = hi846_link_freqs;
+ int freqs_count = ARRAY_SIZE(hi846_link_freqs);
+ int i, j;
+
+ for (i = 0; i < freqs_count; i++) {
+ for (j = 0; j < ep->nr_of_link_frequencies; j++)
+ if (freqs[i] == ep->link_frequencies[j])
+ break;
+ if (j == ep->nr_of_link_frequencies)
+ return freqs[i];
+ }
+
+ return 0;
+}
+
+static int hi846_parse_dt(struct hi846 *hi846, struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ int ret;
+ s64 fq;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep) {
+ dev_err(dev, "unable to find endpoint node\n");
+ return -ENXIO;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret) {
+ dev_err(dev, "failed to parse endpoint node: %d\n", ret);
+ return ret;
+ }
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2 &&
+ bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ return -EINVAL;
+ }
+
+ hi846->nr_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+ return -EINVAL;
+ }
+
+ /* Check that link frequences for all the modes are in device tree */
+ fq = hi846_check_link_freqs(hi846, &bus_cfg);
+ if (fq) {
+ dev_err(dev, "Link frequency of %lld is not supported\n", fq);
+ return -EINVAL;
+ }
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ hi846->rst_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(hi846->rst_gpio)) {
+ dev_err(dev, "failed to get reset gpio: %pe\n",
+ hi846->rst_gpio);
+ return PTR_ERR(hi846->rst_gpio);
+ }
+
+ hi846->shutdown_gpio = devm_gpiod_get_optional(dev, "shutdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(hi846->shutdown_gpio)) {
+ dev_err(dev, "failed to get shutdown gpio: %pe\n",
+ hi846->shutdown_gpio);
+ return PTR_ERR(hi846->shutdown_gpio);
+ }
+
+ return 0;
+}
+
+static int hi846_probe(struct i2c_client *client)
+{
+ struct hi846 *hi846;
+ int ret;
+ int i;
+ u32 mclk_freq;
+
+ hi846 = devm_kzalloc(&client->dev, sizeof(*hi846), GFP_KERNEL);
+ if (!hi846)
+ return -ENOMEM;
+
+ ret = hi846_parse_dt(hi846, &client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ hi846->clock = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(hi846->clock)) {
+ dev_err(&client->dev, "failed to get clock: %pe\n",
+ hi846->clock);
+ return PTR_ERR(hi846->clock);
+ }
+
+ mclk_freq = clk_get_rate(hi846->clock);
+ if (mclk_freq != 25000000)
+ dev_warn(&client->dev,
+ "External clock freq should be 25000000, not %u.\n",
+ mclk_freq);
+
+ for (i = 0; i < HI846_NUM_SUPPLIES; i++)
+ hi846->supplies[i].supply = hi846_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&client->dev, HI846_NUM_SUPPLIES,
+ hi846->supplies);
+ if (ret < 0)
+ return ret;
+
+ v4l2_i2c_subdev_init(&hi846->sd, client, &hi846_subdev_ops);
+
+ mutex_init(&hi846->mutex);
+
+ ret = hi846_power_on(hi846);
+ if (ret)
+ goto err_mutex;
+
+ ret = hi846_identify_module(hi846);
+ if (ret)
+ goto err_power_off;
+
+ hi846->cur_mode = &supported_modes[0];
+
+ ret = hi846_init_controls(hi846);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto err_power_off;
+ }
+
+ hi846->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ hi846->sd.entity.ops = &hi846_subdev_entity_ops;
+ hi846->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ hi846->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&hi846->sd.entity, 1, &hi846->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto err_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&hi846->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto err_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+err_media_entity_cleanup:
+ media_entity_cleanup(&hi846->sd.entity);
+
+err_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(hi846->sd.ctrl_handler);
+
+err_power_off:
+ hi846_power_off(hi846);
+
+err_mutex:
+ mutex_destroy(&hi846->mutex);
+
+ return ret;
+}
+
+static int hi846_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi846 *hi846 = to_hi846(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ hi846_suspend(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ mutex_destroy(&hi846->mutex);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(hi846_pm_ops, hi846_suspend, hi846_resume, NULL);
+
+static const struct of_device_id hi846_of_match[] = {
+ { .compatible = "hynix,hi846", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi846_of_match);
+
+static struct i2c_driver hi846_i2c_driver = {
+ .driver = {
+ .name = "hi846",
+ .pm = &hi846_pm_ops,
+ .of_match_table = of_match_ptr(hi846_of_match),
+ },
+ .probe_new = hi846_probe,
+ .remove = hi846_remove,
+};
+
+module_i2c_driver(hi846_i2c_driver);
+
+MODULE_AUTHOR("Angus Ainslie <angus@akkea.ca>");
+MODULE_AUTHOR("Martin Kepplinger <martin.kepplinger@puri.sm>");
+MODULE_DESCRIPTION("Hynix HI846 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index 81cdf37216ca..c249507aa2db 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1260,18 +1260,18 @@ static int imx258_probe(struct i2c_client *client)
return -ENOMEM;
imx258->clk = devm_clk_get_optional(&client->dev, NULL);
+ if (IS_ERR(imx258->clk))
+ return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
+ "error getting clock\n");
if (!imx258->clk) {
dev_dbg(&client->dev,
"no clock provided, using clock-frequency property\n");
device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != IMX258_INPUT_CLOCK_FREQ)
- return -EINVAL;
- } else if (IS_ERR(imx258->clk)) {
- return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
- "error getting clock\n");
+ } else {
+ val = clk_get_rate(imx258->clk);
}
- if (clk_get_rate(imx258->clk) != IMX258_INPUT_CLOCK_FREQ) {
+ if (val != IMX258_INPUT_CLOCK_FREQ) {
dev_err(&client->dev, "input clock frequency not supported\n");
return -EINVAL;
}
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 92376592455e..56674173524f 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -791,6 +791,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
rc_proto = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE |
RC_PROTO_BIT_RC6_6A_32;
ir_codes = RC_MAP_HAUPPAUGE;
+ ir->polling_interval = 125;
probe_tx = true;
break;
}
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 1aa2c58fd38c..7c663fd587bb 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -606,19 +606,18 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
if (!priv->nsources)
return 0;
- v4l2_async_notifier_init(&priv->notifier);
+ v4l2_async_nf_init(&priv->notifier);
for_each_source(priv, source) {
unsigned int i = to_index(priv, source);
struct max9286_asd *mas;
- mas = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
- source->fwnode,
- struct max9286_asd);
+ mas = v4l2_async_nf_add_fwnode(&priv->notifier, source->fwnode,
+ struct max9286_asd);
if (IS_ERR(mas)) {
dev_err(dev, "Failed to add subdev for source %u: %ld",
i, PTR_ERR(mas));
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
return PTR_ERR(mas);
}
@@ -627,10 +626,10 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
priv->notifier.ops = &max9286_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&priv->sd, &priv->notifier);
+ ret = v4l2_async_subdev_nf_register(&priv->sd, &priv->notifier);
if (ret) {
dev_err(dev, "Failed to register subdev_notifier");
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
return ret;
}
@@ -642,8 +641,8 @@ static void max9286_v4l2_notifier_unregister(struct max9286_priv *priv)
if (!priv->nsources)
return;
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
}
static int max9286_s_stream(struct v4l2_subdev *sd, int enable)
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 6eb88ef99783..cbce8b88dbcf 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -27,6 +27,7 @@
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include "aptina-pll.h"
@@ -75,38 +76,38 @@
#define MT9P031_PLL_CONFIG_1 0x11
#define MT9P031_PLL_CONFIG_2 0x12
#define MT9P031_PIXEL_CLOCK_CONTROL 0x0a
-#define MT9P031_PIXEL_CLOCK_INVERT (1 << 15)
+#define MT9P031_PIXEL_CLOCK_INVERT BIT(15)
#define MT9P031_PIXEL_CLOCK_SHIFT(n) ((n) << 8)
#define MT9P031_PIXEL_CLOCK_DIVIDE(n) ((n) << 0)
-#define MT9P031_FRAME_RESTART 0x0b
+#define MT9P031_RESTART 0x0b
+#define MT9P031_FRAME_PAUSE_RESTART BIT(1)
+#define MT9P031_FRAME_RESTART BIT(0)
#define MT9P031_SHUTTER_DELAY 0x0c
#define MT9P031_RST 0x0d
-#define MT9P031_RST_ENABLE 1
-#define MT9P031_RST_DISABLE 0
+#define MT9P031_RST_ENABLE BIT(0)
#define MT9P031_READ_MODE_1 0x1e
#define MT9P031_READ_MODE_2 0x20
-#define MT9P031_READ_MODE_2_ROW_MIR (1 << 15)
-#define MT9P031_READ_MODE_2_COL_MIR (1 << 14)
-#define MT9P031_READ_MODE_2_ROW_BLC (1 << 6)
+#define MT9P031_READ_MODE_2_ROW_MIR BIT(15)
+#define MT9P031_READ_MODE_2_COL_MIR BIT(14)
+#define MT9P031_READ_MODE_2_ROW_BLC BIT(6)
#define MT9P031_ROW_ADDRESS_MODE 0x22
#define MT9P031_COLUMN_ADDRESS_MODE 0x23
#define MT9P031_GLOBAL_GAIN 0x35
#define MT9P031_GLOBAL_GAIN_MIN 8
#define MT9P031_GLOBAL_GAIN_MAX 1024
#define MT9P031_GLOBAL_GAIN_DEF 8
-#define MT9P031_GLOBAL_GAIN_MULT (1 << 6)
+#define MT9P031_GLOBAL_GAIN_MULT BIT(6)
#define MT9P031_ROW_BLACK_TARGET 0x49
#define MT9P031_ROW_BLACK_DEF_OFFSET 0x4b
#define MT9P031_GREEN1_OFFSET 0x60
#define MT9P031_GREEN2_OFFSET 0x61
#define MT9P031_BLACK_LEVEL_CALIBRATION 0x62
-#define MT9P031_BLC_MANUAL_BLC (1 << 0)
+#define MT9P031_BLC_MANUAL_BLC BIT(0)
#define MT9P031_RED_OFFSET 0x63
#define MT9P031_BLUE_OFFSET 0x64
#define MT9P031_TEST_PATTERN 0xa0
#define MT9P031_TEST_PATTERN_SHIFT 3
-#define MT9P031_TEST_PATTERN_ENABLE (1 << 0)
-#define MT9P031_TEST_PATTERN_DISABLE (0 << 0)
+#define MT9P031_TEST_PATTERN_ENABLE BIT(0)
#define MT9P031_TEST_PATTERN_GREEN 0xa1
#define MT9P031_TEST_PATTERN_RED 0xa2
#define MT9P031_TEST_PATTERN_BLUE 0xa3
@@ -196,7 +197,7 @@ static int mt9p031_reset(struct mt9p031 *mt9p031)
ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_ENABLE);
if (ret < 0)
return ret;
- ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_DISABLE);
+ ret = mt9p031_write(client, MT9P031_RST, 0);
if (ret < 0)
return ret;
@@ -229,6 +230,7 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
struct mt9p031_platform_data *pdata = mt9p031->pdata;
+ unsigned long ext_freq;
int ret;
mt9p031->clk = devm_clk_get(&client->dev, NULL);
@@ -239,13 +241,15 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
if (ret < 0)
return ret;
+ ext_freq = clk_get_rate(mt9p031->clk);
+
/* If the external clock frequency is out of bounds for the PLL use the
* pixel clock divider only and disable the PLL.
*/
- if (pdata->ext_freq > limits.ext_clock_max) {
+ if (ext_freq > limits.ext_clock_max) {
unsigned int div;
- div = DIV_ROUND_UP(pdata->ext_freq, pdata->target_freq);
+ div = DIV_ROUND_UP(ext_freq, pdata->target_freq);
div = roundup_pow_of_two(div) / 2;
mt9p031->clk_div = min_t(unsigned int, div, 64);
@@ -254,7 +258,7 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
return 0;
}
- mt9p031->pll.ext_clock = pdata->ext_freq;
+ mt9p031->pll.ext_clock = ext_freq;
mt9p031->pll.pix_clock = pdata->target_freq;
mt9p031->use_pll = true;
@@ -369,6 +373,14 @@ static int __mt9p031_set_power(struct mt9p031 *mt9p031, bool on)
return ret;
}
+ /* Configure the pixel clock polarity */
+ if (mt9p031->pdata && mt9p031->pdata->pixclk_pol) {
+ ret = mt9p031_write(client, MT9P031_PIXEL_CLOCK_CONTROL,
+ MT9P031_PIXEL_CLOCK_INVERT);
+ if (ret < 0)
+ return ret;
+ }
+
return v4l2_ctrl_handler_setup(&mt9p031->ctrls);
}
@@ -444,9 +456,23 @@ static int mt9p031_set_params(struct mt9p031 *mt9p031)
static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int val;
int ret;
if (!enable) {
+ /* enable pause restart */
+ val = MT9P031_FRAME_PAUSE_RESTART;
+ ret = mt9p031_write(client, MT9P031_RESTART, val);
+ if (ret < 0)
+ return ret;
+
+ /* enable restart + keep pause restart set */
+ val |= MT9P031_FRAME_RESTART;
+ ret = mt9p031_write(client, MT9P031_RESTART, val);
+ if (ret < 0)
+ return ret;
+
/* Stop sensor readout */
ret = mt9p031_set_output_control(mt9p031,
MT9P031_OUTPUT_CONTROL_CEN, 0);
@@ -466,6 +492,16 @@ static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
if (ret < 0)
return ret;
+ /*
+ * - clear pause restart
+ * - don't clear restart as clearing restart manually can cause
+ * undefined behavior
+ */
+ val = MT9P031_FRAME_RESTART;
+ ret = mt9p031_write(client, MT9P031_RESTART, val);
+ if (ret < 0)
+ return ret;
+
return mt9p031_pll_enable(mt9p031);
}
@@ -756,8 +792,7 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret < 0)
return ret;
- return mt9p031_write(client, MT9P031_TEST_PATTERN,
- MT9P031_TEST_PATTERN_DISABLE);
+ return mt9p031_write(client, MT9P031_TEST_PATTERN, 0);
}
ret = mt9p031_write(client, MT9P031_TEST_PATTERN_GREEN, 0x05a0);
@@ -1011,8 +1046,11 @@ static const struct v4l2_subdev_internal_ops mt9p031_subdev_internal_ops = {
static struct mt9p031_platform_data *
mt9p031_get_pdata(struct i2c_client *client)
{
- struct mt9p031_platform_data *pdata;
+ struct mt9p031_platform_data *pdata = NULL;
struct device_node *np;
+ struct v4l2_fwnode_endpoint endpoint = {
+ .bus_type = V4L2_MBUS_PARALLEL
+ };
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
@@ -1021,6 +1059,9 @@ mt9p031_get_pdata(struct i2c_client *client)
if (!np)
return NULL;
+ if (v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &endpoint) < 0)
+ goto done;
+
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
goto done;
@@ -1028,6 +1069,9 @@ mt9p031_get_pdata(struct i2c_client *client)
of_property_read_u32(np, "input-clock-frequency", &pdata->ext_freq);
of_property_read_u32(np, "pixel-clock-frequency", &pdata->target_freq);
+ pdata->pixclk_pol = !!(endpoint.bus.parallel.flags &
+ V4L2_MBUS_PCLK_SAMPLE_RISING);
+
done:
of_node_put(np);
return pdata;
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 7fc70af53e45..b4d22f5d9933 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#define OV13858_REG_VALUE_08BIT 1
@@ -1553,6 +1554,12 @@ static int ov13858_identify_module(struct ov13858 *ov13858)
return 0;
}
+static const struct v4l2_subdev_core_ops ov13858_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops ov13858_video_ops = {
.s_stream = ov13858_set_stream,
};
@@ -1569,6 +1576,7 @@ static const struct v4l2_subdev_sensor_ops ov13858_sensor_ops = {
};
static const struct v4l2_subdev_ops ov13858_subdev_ops = {
+ .core = &ov13858_core_ops,
.video = &ov13858_video_ops,
.pad = &ov13858_pad_ops,
.sensor = &ov13858_sensor_ops,
@@ -1724,7 +1732,8 @@ static int ov13858_probe(struct i2c_client *client,
/* Initialize subdev */
ov13858->sd.internal_ops = &ov13858_internal_ops;
- ov13858->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov13858->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
ov13858->sd.entity.ops = &ov13858_subdev_entity_ops;
ov13858->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
new file mode 100644
index 000000000000..7caeae641051
--- /dev/null
+++ b/drivers/media/i2c/ov13b10.c
@@ -0,0 +1,1491 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2021 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV13B10_REG_VALUE_08BIT 1
+#define OV13B10_REG_VALUE_16BIT 2
+#define OV13B10_REG_VALUE_24BIT 3
+
+#define OV13B10_REG_MODE_SELECT 0x0100
+#define OV13B10_MODE_STANDBY 0x00
+#define OV13B10_MODE_STREAMING 0x01
+
+#define OV13B10_REG_SOFTWARE_RST 0x0103
+#define OV13B10_SOFTWARE_RST 0x01
+
+/* Chip ID */
+#define OV13B10_REG_CHIP_ID 0x300a
+#define OV13B10_CHIP_ID 0x560d42
+
+/* V_TIMING internal */
+#define OV13B10_REG_VTS 0x380e
+#define OV13B10_VTS_30FPS 0x0c7c
+#define OV13B10_VTS_60FPS 0x063e
+#define OV13B10_VTS_MAX 0x7fff
+
+/* HBLANK control - read only */
+#define OV13B10_PPL_560MHZ 4704
+
+/* Exposure control */
+#define OV13B10_REG_EXPOSURE 0x3500
+#define OV13B10_EXPOSURE_MIN 4
+#define OV13B10_EXPOSURE_STEP 1
+#define OV13B10_EXPOSURE_DEFAULT 0x40
+
+/* Analog gain control */
+#define OV13B10_REG_ANALOG_GAIN 0x3508
+#define OV13B10_ANA_GAIN_MIN 0x80
+#define OV13B10_ANA_GAIN_MAX 0x07c0
+#define OV13B10_ANA_GAIN_STEP 1
+#define OV13B10_ANA_GAIN_DEFAULT 0x80
+
+/* Digital gain control */
+#define OV13B10_REG_DGTL_GAIN_H 0x350a
+#define OV13B10_REG_DGTL_GAIN_M 0x350b
+#define OV13B10_REG_DGTL_GAIN_L 0x350c
+
+#define OV13B10_DGTL_GAIN_MIN 1024 /* Min = 1 X */
+#define OV13B10_DGTL_GAIN_MAX (4096 - 1) /* Max = 4 X */
+#define OV13B10_DGTL_GAIN_DEFAULT 2560 /* Default gain = 2.5 X */
+#define OV13B10_DGTL_GAIN_STEP 1 /* Each step = 1/1024 */
+
+#define OV13B10_DGTL_GAIN_L_SHIFT 6
+#define OV13B10_DGTL_GAIN_L_MASK 0x3
+#define OV13B10_DGTL_GAIN_M_SHIFT 2
+#define OV13B10_DGTL_GAIN_M_MASK 0xff
+#define OV13B10_DGTL_GAIN_H_SHIFT 10
+#define OV13B10_DGTL_GAIN_H_MASK 0x3
+
+/* Test Pattern Control */
+#define OV13B10_REG_TEST_PATTERN 0x5080
+#define OV13B10_TEST_PATTERN_ENABLE BIT(7)
+#define OV13B10_TEST_PATTERN_MASK 0xf3
+#define OV13B10_TEST_PATTERN_BAR_SHIFT 2
+
+/* Flip Control */
+#define OV13B10_REG_FORMAT1 0x3820
+#define OV13B10_REG_FORMAT2 0x3821
+
+/* Horizontal Window Offset */
+#define OV13B10_REG_H_WIN_OFFSET 0x3811
+
+/* Vertical Window Offset */
+#define OV13B10_REG_V_WIN_OFFSET 0x3813
+
+struct ov13b10_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov13b10_reg_list {
+ u32 num_of_regs;
+ const struct ov13b10_reg *regs;
+};
+
+/* Link frequency config */
+struct ov13b10_link_freq_config {
+ u32 pixels_per_line;
+
+ /* registers for this link frequency */
+ struct ov13b10_reg_list reg_list;
+};
+
+/* Mode : resolution and related config&values */
+struct ov13b10_mode {
+ /* Frame width */
+ u32 width;
+ /* Frame height */
+ u32 height;
+
+ /* V-timing */
+ u32 vts_def;
+ u32 vts_min;
+
+ /* Index of Link frequency config to be used */
+ u32 link_freq_index;
+ /* Default register values */
+ struct ov13b10_reg_list reg_list;
+};
+
+/* 4208x3120 needs 1120Mbps/lane, 4 lanes */
+static const struct ov13b10_reg mipi_data_rate_1120mbps[] = {
+ {0x0103, 0x01},
+ {0x0303, 0x04},
+ {0x0305, 0xaf},
+ {0x0321, 0x00},
+ {0x0323, 0x04},
+ {0x0324, 0x01},
+ {0x0325, 0xa4},
+ {0x0326, 0x81},
+ {0x0327, 0x04},
+ {0x3012, 0x07},
+ {0x3013, 0x32},
+ {0x3107, 0x23},
+ {0x3501, 0x0c},
+ {0x3502, 0x10},
+ {0x3504, 0x08},
+ {0x3508, 0x07},
+ {0x3509, 0xc0},
+ {0x3600, 0x16},
+ {0x3601, 0x54},
+ {0x3612, 0x4e},
+ {0x3620, 0x00},
+ {0x3621, 0x68},
+ {0x3622, 0x66},
+ {0x3623, 0x03},
+ {0x3662, 0x92},
+ {0x3666, 0xbb},
+ {0x3667, 0x44},
+ {0x366e, 0xff},
+ {0x366f, 0xf3},
+ {0x3675, 0x44},
+ {0x3676, 0x00},
+ {0x367f, 0xe9},
+ {0x3681, 0x32},
+ {0x3682, 0x1f},
+ {0x3683, 0x0b},
+ {0x3684, 0x0b},
+ {0x3704, 0x0f},
+ {0x3706, 0x40},
+ {0x3708, 0x3b},
+ {0x3709, 0x72},
+ {0x370b, 0xa2},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3725, 0x42},
+ {0x3739, 0x12},
+ {0x3767, 0x00},
+ {0x377a, 0x0d},
+ {0x3789, 0x18},
+ {0x3790, 0x40},
+ {0x3791, 0xa2},
+ {0x37c2, 0x04},
+ {0x37c3, 0xf1},
+ {0x37d9, 0x0c},
+ {0x37da, 0x02},
+ {0x37dc, 0x02},
+ {0x37e1, 0x04},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x70},
+ {0x380a, 0x0c},
+ {0x380b, 0x30},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3811, 0x0f},
+ {0x3813, 0x09},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+ {0x381f, 0x08},
+ {0x3820, 0x88},
+ {0x3821, 0x00},
+ {0x3822, 0x14},
+ {0x382e, 0xe6},
+ {0x3c80, 0x00},
+ {0x3c87, 0x01},
+ {0x3c8c, 0x19},
+ {0x3c8d, 0x1c},
+ {0x3ca0, 0x00},
+ {0x3ca1, 0x00},
+ {0x3ca2, 0x00},
+ {0x3ca3, 0x00},
+ {0x3ca4, 0x50},
+ {0x3ca5, 0x11},
+ {0x3ca6, 0x01},
+ {0x3ca7, 0x00},
+ {0x3ca8, 0x00},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x400a, 0x01},
+ {0x400b, 0x19},
+ {0x4011, 0x21},
+ {0x4017, 0x08},
+ {0x4019, 0x04},
+ {0x401a, 0x58},
+ {0x4032, 0x1e},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x405e, 0x00},
+ {0x4066, 0x02},
+ {0x4501, 0x00},
+ {0x4502, 0x10},
+ {0x4505, 0x00},
+ {0x4800, 0x64},
+ {0x481b, 0x3e},
+ {0x481f, 0x30},
+ {0x4825, 0x34},
+ {0x4837, 0x0e},
+ {0x484b, 0x01},
+ {0x4883, 0x02},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+ {0x5045, 0x20},
+ {0x5046, 0x20},
+ {0x5047, 0xa4},
+ {0x5048, 0x20},
+ {0x5049, 0xa4},
+ {0x0100, 0x01},
+};
+
+static const struct ov13b10_reg mode_4208x3120_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x0c},
+ {0x3662, 0x92},
+ {0x3714, 0x24},
+ {0x3739, 0x12},
+ {0x37c2, 0x04},
+ {0x37d9, 0x0c},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x70},
+ {0x380a, 0x0c},
+ {0x380b, 0x30},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3810, 0x00},
+ {0x3811, 0x0f},
+ {0x3812, 0x00},
+ {0x3813, 0x09},
+ {0x3814, 0x01},
+ {0x3816, 0x01},
+ {0x3820, 0x88},
+ {0x3c8c, 0x19},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x4501, 0x00},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+};
+
+static const struct ov13b10_reg mode_4160x3120_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x0c},
+ {0x3662, 0x92},
+ {0x3714, 0x24},
+ {0x3739, 0x12},
+ {0x37c2, 0x04},
+ {0x37d9, 0x0c},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x40},
+ {0x380a, 0x0c},
+ {0x380b, 0x30},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3810, 0x00},
+ {0x3811, 0x27},
+ {0x3812, 0x00},
+ {0x3813, 0x09},
+ {0x3814, 0x01},
+ {0x3816, 0x01},
+ {0x3820, 0x88},
+ {0x3c8c, 0x19},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x4501, 0x00},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+};
+
+static const struct ov13b10_reg mode_4160x2340_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x0c},
+ {0x3662, 0x92},
+ {0x3714, 0x24},
+ {0x3739, 0x12},
+ {0x37c2, 0x04},
+ {0x37d9, 0x0c},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x40},
+ {0x380a, 0x09},
+ {0x380b, 0x24},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3810, 0x00},
+ {0x3811, 0x27},
+ {0x3812, 0x01},
+ {0x3813, 0x8f},
+ {0x3814, 0x01},
+ {0x3816, 0x01},
+ {0x3820, 0x88},
+ {0x3c8c, 0x19},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x4501, 0x00},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+};
+
+static const struct ov13b10_reg mode_2104x1560_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x06},
+ {0x3662, 0x88},
+ {0x3714, 0x28},
+ {0x3739, 0x10},
+ {0x37c2, 0x14},
+ {0x37d9, 0x06},
+ {0x37e2, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x08},
+ {0x3809, 0x38},
+ {0x380a, 0x06},
+ {0x380b, 0x18},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x06},
+ {0x380f, 0x3e},
+ {0x3810, 0x00},
+ {0x3811, 0x07},
+ {0x3812, 0x00},
+ {0x3813, 0x05},
+ {0x3814, 0x03},
+ {0x3816, 0x03},
+ {0x3820, 0x8b},
+ {0x3c8c, 0x18},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4050, 0x00},
+ {0x4051, 0x05},
+ {0x4501, 0x08},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xfd},
+ {0x5001, 0x0d},
+};
+
+static const struct ov13b10_reg mode_2080x1170_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x06},
+ {0x3662, 0x88},
+ {0x3714, 0x28},
+ {0x3739, 0x10},
+ {0x37c2, 0x14},
+ {0x37d9, 0x06},
+ {0x37e2, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x08},
+ {0x3809, 0x20},
+ {0x380a, 0x04},
+ {0x380b, 0x92},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x06},
+ {0x380f, 0x3e},
+ {0x3810, 0x00},
+ {0x3811, 0x13},
+ {0x3812, 0x00},
+ {0x3813, 0xc9},
+ {0x3814, 0x03},
+ {0x3816, 0x03},
+ {0x3820, 0x8b},
+ {0x3c8c, 0x18},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4050, 0x00},
+ {0x4051, 0x05},
+ {0x4501, 0x08},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xfd},
+ {0x5001, 0x0d},
+};
+
+static const char * const ov13b10_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color Bar Type 1",
+ "Vertical Color Bar Type 2",
+ "Vertical Color Bar Type 3",
+ "Vertical Color Bar Type 4"
+};
+
+/* Configurations for supported link frequencies */
+#define OV13B10_LINK_FREQ_560MHZ 560000000ULL
+#define OV13B10_LINK_FREQ_INDEX_0 0
+
+#define OV13B10_EXT_CLK 19200000
+#define OV13B10_DATA_LANES 4
+
+/*
+ * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
+ * data rate => double data rate; number of lanes => 4; bits per pixel => 10
+ */
+static u64 link_freq_to_pixel_rate(u64 f)
+{
+ f *= 2 * OV13B10_DATA_LANES;
+ do_div(f, 10);
+
+ return f;
+}
+
+/* Menu items for LINK_FREQ V4L2 control */
+static const s64 link_freq_menu_items[] = {
+ OV13B10_LINK_FREQ_560MHZ
+};
+
+/* Link frequency configs */
+static const struct ov13b10_link_freq_config
+ link_freq_configs[] = {
+ {
+ .pixels_per_line = OV13B10_PPL_560MHZ,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_1120mbps),
+ .regs = mipi_data_rate_1120mbps,
+ }
+ }
+};
+
+/* Mode configs */
+static const struct ov13b10_mode supported_modes[] = {
+ {
+ .width = 4208,
+ .height = 3120,
+ .vts_def = OV13B10_VTS_30FPS,
+ .vts_min = OV13B10_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4208x3120_regs),
+ .regs = mode_4208x3120_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 4160,
+ .height = 3120,
+ .vts_def = OV13B10_VTS_30FPS,
+ .vts_min = OV13B10_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4160x3120_regs),
+ .regs = mode_4160x3120_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 4160,
+ .height = 2340,
+ .vts_def = OV13B10_VTS_30FPS,
+ .vts_min = OV13B10_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4160x2340_regs),
+ .regs = mode_4160x2340_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 2104,
+ .height = 1560,
+ .vts_def = OV13B10_VTS_60FPS,
+ .vts_min = OV13B10_VTS_60FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2104x1560_regs),
+ .regs = mode_2104x1560_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 2080,
+ .height = 1170,
+ .vts_def = OV13B10_VTS_60FPS,
+ .vts_min = OV13B10_VTS_60FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2080x1170_regs),
+ .regs = mode_2080x1170_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ }
+};
+
+struct ov13b10 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov13b10_mode *cur_mode;
+
+ /* Mutex for serialized access */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+#define to_ov13b10(_sd) container_of(_sd, struct ov13b10, sd)
+
+/* Read registers up to 4 at a time */
+static int ov13b10_read_reg(struct ov13b10 *ov13b,
+ u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ struct i2c_msg msgs[2];
+ u8 *data_be_p;
+ int ret;
+ __be32 data_be = 0;
+ __be16 reg_addr_be = cpu_to_be16(reg);
+
+ if (len > 4)
+ return -EINVAL;
+
+ data_be_p = (u8 *)&data_be;
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = (u8 *)&reg_addr_be;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_be_p[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = be32_to_cpu(data_be);
+
+ return 0;
+}
+
+/* Write registers up to 4 at a time */
+static int ov13b10_write_reg(struct ov13b10 *ov13b,
+ u16 reg, u32 len, u32 __val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ int buf_i, val_i;
+ u8 buf[6], *val_p;
+ __be32 val;
+
+ if (len > 4)
+ return -EINVAL;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+
+ val = cpu_to_be32(__val);
+ val_p = (u8 *)&val;
+ buf_i = 2;
+ val_i = 4 - len;
+
+ while (val_i < 4)
+ buf[buf_i++] = val_p[val_i++];
+
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int ov13b10_write_regs(struct ov13b10 *ov13b,
+ const struct ov13b10_reg *regs, u32 len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ int ret;
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ ret = ov13b10_write_reg(ov13b, regs[i].address, 1,
+ regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "Failed to write reg 0x%4.4x. error = %d\n",
+ regs[i].address, ret);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov13b10_write_reg_list(struct ov13b10 *ov13b,
+ const struct ov13b10_reg_list *r_list)
+{
+ return ov13b10_write_regs(ov13b, r_list->regs, r_list->num_of_regs);
+}
+
+/* Open sub-device */
+static int ov13b10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ const struct ov13b10_mode *default_mode = &supported_modes[0];
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
+
+ mutex_lock(&ov13b->mutex);
+
+ /* Initialize try_fmt */
+ try_fmt->width = default_mode->width;
+ try_fmt->height = default_mode->height;
+ try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ /* No crop or compose */
+ mutex_unlock(&ov13b->mutex);
+
+ return 0;
+}
+
+static int ov13b10_update_digital_gain(struct ov13b10 *ov13b, u32 d_gain)
+{
+ int ret;
+ u32 val;
+
+ /*
+ * 0x350C[7:6], 0x350B[7:0], 0x350A[1:0]
+ */
+
+ val = (d_gain & OV13B10_DGTL_GAIN_L_MASK) << OV13B10_DGTL_GAIN_L_SHIFT;
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_DGTL_GAIN_L,
+ OV13B10_REG_VALUE_08BIT, val);
+ if (ret)
+ return ret;
+
+ val = (d_gain >> OV13B10_DGTL_GAIN_M_SHIFT) & OV13B10_DGTL_GAIN_M_MASK;
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_DGTL_GAIN_M,
+ OV13B10_REG_VALUE_08BIT, val);
+ if (ret)
+ return ret;
+
+ val = (d_gain >> OV13B10_DGTL_GAIN_H_SHIFT) & OV13B10_DGTL_GAIN_H_MASK;
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_DGTL_GAIN_H,
+ OV13B10_REG_VALUE_08BIT, val);
+
+ return ret;
+}
+
+static int ov13b10_enable_test_pattern(struct ov13b10 *ov13b, u32 pattern)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_TEST_PATTERN,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ if (pattern) {
+ val &= OV13B10_TEST_PATTERN_MASK;
+ val |= ((pattern - 1) << OV13B10_TEST_PATTERN_BAR_SHIFT) |
+ OV13B10_TEST_PATTERN_ENABLE;
+ } else {
+ val &= ~OV13B10_TEST_PATTERN_ENABLE;
+ }
+
+ return ov13b10_write_reg(ov13b, OV13B10_REG_TEST_PATTERN,
+ OV13B10_REG_VALUE_08BIT, val);
+}
+
+static int ov13b10_set_ctrl_hflip(struct ov13b10 *ov13b, u32 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? val & ~BIT(3) : val);
+
+ if (ret)
+ return ret;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_H_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Applying cropping offset to reverse the change of Bayer order
+ * after mirroring image
+ */
+ return ov13b10_write_reg(ov13b, OV13B10_REG_H_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? ++val : val);
+}
+
+static int ov13b10_set_ctrl_vflip(struct ov13b10 *ov13b, u32 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? val | BIT(4) | BIT(5) : val);
+
+ if (ret)
+ return ret;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_V_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Applying cropping offset to reverse the change of Bayer order
+ * after flipping image
+ */
+ return ov13b10_write_reg(ov13b, OV13B10_REG_V_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? --val : val);
+}
+
+static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov13b10 *ov13b = container_of(ctrl->handler,
+ struct ov13b10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ s64 max;
+ int ret;
+
+ /* Propagate change of current control to all related controls */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ /* Update max exposure while meeting expected vblanking */
+ max = ov13b->cur_mode->height + ctrl->val - 8;
+ __v4l2_ctrl_modify_range(ov13b->exposure,
+ ov13b->exposure->minimum,
+ max, ov13b->exposure->step, max);
+ break;
+ }
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ ret = 0;
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_ANALOG_GAIN,
+ OV13B10_REG_VALUE_16BIT,
+ ctrl->val << 1);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov13b10_update_digital_gain(ov13b, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_EXPOSURE,
+ OV13B10_REG_VALUE_24BIT,
+ ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_VTS,
+ OV13B10_REG_VALUE_16BIT,
+ ov13b->cur_mode->height
+ + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov13b10_enable_test_pattern(ov13b, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ ov13b10_set_ctrl_hflip(ov13b, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ ov13b10_set_ctrl_vflip(ov13b, ctrl->val);
+ break;
+ default:
+ dev_info(&client->dev,
+ "ctrl(id:0x%x,val:0x%x) is not handled\n",
+ ctrl->id, ctrl->val);
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov13b10_ctrl_ops = {
+ .s_ctrl = ov13b10_set_ctrl,
+};
+
+static int ov13b10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* Only one bayer order(GRBG) is supported */
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov13b10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void ov13b10_update_pad_format(const struct ov13b10_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->format.field = V4L2_FIELD_NONE;
+}
+
+static int ov13b10_do_get_pad_format(struct ov13b10 *ov13b,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *framefmt;
+ struct v4l2_subdev *sd = &ov13b->sd;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ fmt->format = *framefmt;
+ } else {
+ ov13b10_update_pad_format(ov13b->cur_mode, fmt);
+ }
+
+ return 0;
+}
+
+static int ov13b10_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ int ret;
+
+ mutex_lock(&ov13b->mutex);
+ ret = ov13b10_do_get_pad_format(ov13b, sd_state, fmt);
+ mutex_unlock(&ov13b->mutex);
+
+ return ret;
+}
+
+static int
+ov13b10_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ const struct ov13b10_mode *mode;
+ struct v4l2_mbus_framefmt *framefmt;
+ s32 vblank_def;
+ s32 vblank_min;
+ s64 h_blank;
+ s64 pixel_rate;
+ s64 link_freq;
+
+ mutex_lock(&ov13b->mutex);
+
+ /* Only one raw bayer(GRBG) order is supported */
+ if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+ ov13b10_update_pad_format(mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ *framefmt = fmt->format;
+ } else {
+ ov13b->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov13b->link_freq, mode->link_freq_index);
+ link_freq = link_freq_menu_items[mode->link_freq_index];
+ pixel_rate = link_freq_to_pixel_rate(link_freq);
+ __v4l2_ctrl_s_ctrl_int64(ov13b->pixel_rate, pixel_rate);
+
+ /* Update limits and set FPS to default */
+ vblank_def = ov13b->cur_mode->vts_def -
+ ov13b->cur_mode->height;
+ vblank_min = ov13b->cur_mode->vts_min -
+ ov13b->cur_mode->height;
+ __v4l2_ctrl_modify_range(ov13b->vblank, vblank_min,
+ OV13B10_VTS_MAX
+ - ov13b->cur_mode->height,
+ 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov13b->vblank, vblank_def);
+ h_blank =
+ link_freq_configs[mode->link_freq_index].pixels_per_line
+ - ov13b->cur_mode->width;
+ __v4l2_ctrl_modify_range(ov13b->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ mutex_unlock(&ov13b->mutex);
+
+ return 0;
+}
+
+static int ov13b10_start_streaming(struct ov13b10 *ov13b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ const struct ov13b10_reg_list *reg_list;
+ int ret, link_freq_index;
+
+ /* Get out of from software reset */
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_SOFTWARE_RST,
+ OV13B10_REG_VALUE_08BIT, OV13B10_SOFTWARE_RST);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set powerup registers\n",
+ __func__);
+ return ret;
+ }
+
+ link_freq_index = ov13b->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov13b10_write_reg_list(ov13b, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ reg_list = &ov13b->cur_mode->reg_list;
+ ret = ov13b10_write_reg_list(ov13b, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(ov13b->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ return ov13b10_write_reg(ov13b, OV13B10_REG_MODE_SELECT,
+ OV13B10_REG_VALUE_08BIT,
+ OV13B10_MODE_STREAMING);
+}
+
+/* Stop streaming */
+static int ov13b10_stop_streaming(struct ov13b10 *ov13b)
+{
+ return ov13b10_write_reg(ov13b, OV13B10_REG_MODE_SELECT,
+ OV13B10_REG_VALUE_08BIT, OV13B10_MODE_STANDBY);
+}
+
+static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&ov13b->mutex);
+ if (ov13b->streaming == enable) {
+ mutex_unlock(&ov13b->mutex);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ goto err_unlock;
+
+ /*
+ * Apply default & customized values
+ * and then start streaming.
+ */
+ ret = ov13b10_start_streaming(ov13b);
+ if (ret)
+ goto err_rpm_put;
+ } else {
+ ov13b10_stop_streaming(ov13b);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov13b->streaming = enable;
+ mutex_unlock(&ov13b->mutex);
+
+ return ret;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+err_unlock:
+ mutex_unlock(&ov13b->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov13b10_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+
+ if (ov13b->streaming)
+ ov13b10_stop_streaming(ov13b);
+
+ return 0;
+}
+
+static int __maybe_unused ov13b10_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ int ret;
+
+ if (ov13b->streaming) {
+ ret = ov13b10_start_streaming(ov13b);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ ov13b10_stop_streaming(ov13b);
+ ov13b->streaming = false;
+ return ret;
+}
+
+/* Verify chip ID */
+static int ov13b10_identify_module(struct ov13b10 *ov13b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_CHIP_ID,
+ OV13B10_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV13B10_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV13B10_CHIP_ID, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov13b10_video_ops = {
+ .s_stream = ov13b10_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov13b10_pad_ops = {
+ .enum_mbus_code = ov13b10_enum_mbus_code,
+ .get_fmt = ov13b10_get_pad_format,
+ .set_fmt = ov13b10_set_pad_format,
+ .enum_frame_size = ov13b10_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov13b10_subdev_ops = {
+ .video = &ov13b10_video_ops,
+ .pad = &ov13b10_pad_ops,
+};
+
+static const struct media_entity_operations ov13b10_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov13b10_internal_ops = {
+ .open = ov13b10_open,
+};
+
+/* Initialize control handlers */
+static int ov13b10_init_controls(struct ov13b10 *ov13b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ struct v4l2_fwnode_device_properties props;
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max;
+ s64 vblank_def;
+ s64 vblank_min;
+ s64 hblank;
+ s64 pixel_rate_min;
+ s64 pixel_rate_max;
+ const struct ov13b10_mode *mode;
+ u32 max;
+ int ret;
+
+ ctrl_hdlr = &ov13b->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ if (ret)
+ return ret;
+
+ mutex_init(&ov13b->mutex);
+ ctrl_hdlr->lock = &ov13b->mutex;
+ max = ARRAY_SIZE(link_freq_menu_items) - 1;
+ ov13b->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov13b10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ max,
+ 0,
+ link_freq_menu_items);
+ if (ov13b->link_freq)
+ ov13b->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_min = 0;
+ /* By default, PIXEL_RATE is read only */
+ ov13b->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate_min, pixel_rate_max,
+ 1, pixel_rate_max);
+
+ mode = ov13b->cur_mode;
+ vblank_def = mode->vts_def - mode->height;
+ vblank_min = mode->vts_min - mode->height;
+ ov13b->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_VBLANK,
+ vblank_min,
+ OV13B10_VTS_MAX - mode->height, 1,
+ vblank_def);
+
+ hblank = link_freq_configs[mode->link_freq_index].pixels_per_line -
+ mode->width;
+ ov13b->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_HBLANK,
+ hblank, hblank, 1, hblank);
+ if (ov13b->hblank)
+ ov13b->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ exposure_max = mode->vts_def - 8;
+ ov13b->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV13B10_EXPOSURE_MIN,
+ exposure_max, OV13B10_EXPOSURE_STEP,
+ exposure_max);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV13B10_ANA_GAIN_MIN, OV13B10_ANA_GAIN_MAX,
+ OV13B10_ANA_GAIN_STEP, OV13B10_ANA_GAIN_DEFAULT);
+
+ /* Digital gain */
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV13B10_DGTL_GAIN_MIN, OV13B10_DGTL_GAIN_MAX,
+ OV13B10_DGTL_GAIN_STEP, OV13B10_DGTL_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov13b10_test_pattern_menu) - 1,
+ 0, 0, ov13b10_test_pattern_menu);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "%s control init failed (%d)\n",
+ __func__, ret);
+ goto error;
+ }
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov13b10_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
+
+ ov13b->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ mutex_destroy(&ov13b->mutex);
+
+ return ret;
+}
+
+static void ov13b10_free_controls(struct ov13b10 *ov13b)
+{
+ v4l2_ctrl_handler_free(ov13b->sd.ctrl_handler);
+ mutex_destroy(&ov13b->mutex);
+}
+
+static int ov13b10_check_hwcfg(struct device *dev)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ unsigned int i, j;
+ int ret;
+ u32 ext_clk;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &ext_clk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (ext_clk != OV13B10_EXT_CLK) {
+ dev_err(dev, "external clock %d is not supported",
+ ext_clk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV13B10_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto out_err;
+ }
+ }
+
+out_err:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov13b10_probe(struct i2c_client *client)
+{
+ struct ov13b10 *ov13b;
+ int ret;
+
+ /* Check HW config */
+ ret = ov13b10_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check hwcfg: %d", ret);
+ return ret;
+ }
+
+ ov13b = devm_kzalloc(&client->dev, sizeof(*ov13b), GFP_KERNEL);
+ if (!ov13b)
+ return -ENOMEM;
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
+
+ /* Check module identity */
+ ret = ov13b10_identify_module(ov13b);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ return ret;
+ }
+
+ /* Set default mode to max resolution */
+ ov13b->cur_mode = &supported_modes[0];
+
+ ret = ov13b10_init_controls(ov13b);
+ if (ret)
+ return ret;
+
+ /* Initialize subdev */
+ ov13b->sd.internal_ops = &ov13b10_internal_ops;
+ ov13b->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov13b->sd.entity.ops = &ov13b10_subdev_entity_ops;
+ ov13b->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ ov13b->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov13b->sd.entity, 1, &ov13b->pad);
+ if (ret) {
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
+ if (ret < 0)
+ goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+error_media_entity:
+ media_entity_cleanup(&ov13b->sd.entity);
+
+error_handler_free:
+ ov13b10_free_controls(ov13b);
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int ov13b10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ ov13b10_free_controls(ov13b);
+
+ pm_runtime_disable(&client->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ov13b10_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov13b10_suspend, ov13b10_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov13b10_acpi_ids[] = {
+ {"OVTIDB10"},
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov13b10_acpi_ids);
+#endif
+
+static struct i2c_driver ov13b10_i2c_driver = {
+ .driver = {
+ .name = "ov13b10",
+ .pm = &ov13b10_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov13b10_acpi_ids),
+ },
+ .probe_new = ov13b10_probe,
+ .remove = ov13b10_remove,
+};
+
+module_i2c_driver(ov13b10_i2c_driver);
+
+MODULE_AUTHOR("Kao, Arec <arec.kao@intel.com>");
+MODULE_DESCRIPTION("Omnivision ov13b10 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 49189926afd6..251f459ab484 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#define OV5670_REG_CHIP_ID 0x300a
@@ -2420,6 +2421,12 @@ static int ov5670_identify_module(struct ov5670 *ov5670)
return 0;
}
+static const struct v4l2_subdev_core_ops ov5670_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops ov5670_video_ops = {
.s_stream = ov5670_set_stream,
};
@@ -2436,6 +2443,7 @@ static const struct v4l2_subdev_sensor_ops ov5670_sensor_ops = {
};
static const struct v4l2_subdev_ops ov5670_subdev_ops = {
+ .core = &ov5670_core_ops,
.video = &ov5670_video_ops,
.pad = &ov5670_pad_ops,
.sensor = &ov5670_sensor_ops,
@@ -2489,7 +2497,8 @@ static int ov5670_probe(struct i2c_client *client)
}
ov5670->sd.internal_ops = &ov5670_internal_ops;
- ov5670->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov5670->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
ov5670->sd.entity.ops = &ov5670_subdev_entity_ops;
ov5670->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index aa74744b91c7..c6c6050cda1a 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -107,6 +107,11 @@ static const char * const ov8856_supply_names[] = {
"dvdd", /* Digital core power */
};
+enum {
+ OV8856_MEDIA_BUS_FMT_SBGGR10_1X10,
+ OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
+};
+
struct ov8856_reg {
u16 address;
u8 val;
@@ -145,6 +150,9 @@ struct ov8856_mode {
/* Number of data lanes */
u8 data_lanes;
+
+ /* Default MEDIA_BUS_FMT for this mode */
+ u32 default_mbus_index;
};
struct ov8856_mipi_data_rates {
@@ -1055,7 +1063,7 @@ static const struct ov8856_reg lane_4_mode_3264x2448[] = {
{0x3810, 0x00},
{0x3811, 0x04},
{0x3812, 0x00},
- {0x3813, 0x01},
+ {0x3813, 0x02},
{0x3814, 0x01},
{0x3815, 0x01},
{0x3816, 0x00},
@@ -1259,7 +1267,7 @@ static const struct ov8856_reg lane_4_mode_1632x1224[] = {
{0x3810, 0x00},
{0x3811, 0x02},
{0x3812, 0x00},
- {0x3813, 0x01},
+ {0x3813, 0x02},
{0x3814, 0x03},
{0x3815, 0x01},
{0x3816, 0x00},
@@ -1372,6 +1380,19 @@ static const struct ov8856_reg lane_4_mode_1632x1224[] = {
{0x5e10, 0xfc}
};
+static const struct ov8856_reg mipi_data_mbus_sbggr10_1x10[] = {
+ {0x3813, 0x02},
+};
+
+static const struct ov8856_reg mipi_data_mbus_sgrbg10_1x10[] = {
+ {0x3813, 0x01},
+};
+
+static const u32 ov8856_mbus_codes[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10
+};
+
static const char * const ov8856_test_pattern_menu[] = {
"Disabled",
"Standard Color Bar",
@@ -1380,6 +1401,17 @@ static const char * const ov8856_test_pattern_menu[] = {
"Bottom-Top Darker Color Bar"
};
+static const struct ov8856_reg_list bayer_offset_configs[] = {
+ [OV8856_MEDIA_BUS_FMT_SBGGR10_1X10] = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_mbus_sbggr10_1x10),
+ .regs = mipi_data_mbus_sbggr10_1x10,
+ },
+ [OV8856_MEDIA_BUS_FMT_SGRBG10_1X10] = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_mbus_sgrbg10_1x10),
+ .regs = mipi_data_mbus_sgrbg10_1x10,
+ }
+};
+
struct ov8856 {
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1399,6 +1431,9 @@ struct ov8856 {
/* Current mode */
const struct ov8856_mode *cur_mode;
+ /* Application specified mbus format */
+ u32 cur_mbus_index;
+
/* To serialize asynchronus callbacks */
struct mutex mutex;
@@ -1450,6 +1485,7 @@ static const struct ov8856_lane_cfg lane_cfg_2 = {
},
.link_freq_index = 0,
.data_lanes = 2,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
},
{
.width = 1640,
@@ -1464,6 +1500,7 @@ static const struct ov8856_lane_cfg lane_cfg_2 = {
},
.link_freq_index = 1,
.data_lanes = 2,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
}}
};
@@ -1499,6 +1536,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 0,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
},
{
.width = 1640,
@@ -1513,6 +1551,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 1,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
},
{
.width = 3264,
@@ -1527,6 +1566,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 0,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SBGGR10_1X10,
},
{
.width = 1632,
@@ -1541,6 +1581,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 1,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SBGGR10_1X10,
}}
};
@@ -1904,12 +1945,21 @@ static int ov8856_init_controls(struct ov8856 *ov8856)
return 0;
}
-static void ov8856_update_pad_format(const struct ov8856_mode *mode,
+static void ov8856_update_pad_format(struct ov8856 *ov8856,
+ const struct ov8856_mode *mode,
struct v4l2_mbus_framefmt *fmt)
{
+ int index;
+
fmt->width = mode->width;
fmt->height = mode->height;
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ for (index = 0; index < ARRAY_SIZE(ov8856_mbus_codes); ++index)
+ if (ov8856_mbus_codes[index] == fmt->code)
+ break;
+ if (index == ARRAY_SIZE(ov8856_mbus_codes))
+ index = mode->default_mbus_index;
+ fmt->code = ov8856_mbus_codes[index];
+ ov8856->cur_mbus_index = index;
fmt->field = V4L2_FIELD_NONE;
}
@@ -1935,6 +1985,13 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
return ret;
}
+ reg_list = &bayer_offset_configs[ov8856->cur_mbus_index];
+ ret = ov8856_write_reg_list(ov8856, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mbus format");
+ return ret;
+ }
+
ret = __v4l2_ctrl_handler_setup(ov8856->sd.ctrl_handler);
if (ret)
return ret;
@@ -2096,7 +2153,7 @@ static int ov8856_set_format(struct v4l2_subdev *sd,
fmt->format.height);
mutex_lock(&ov8856->mutex);
- ov8856_update_pad_format(mode, &fmt->format);
+ ov8856_update_pad_format(ov8856, mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
*v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
@@ -2140,7 +2197,7 @@ static int ov8856_get_format(struct v4l2_subdev *sd,
sd_state,
fmt->pad);
else
- ov8856_update_pad_format(ov8856->cur_mode, &fmt->format);
+ ov8856_update_pad_format(ov8856, ov8856->cur_mode, &fmt->format);
mutex_unlock(&ov8856->mutex);
@@ -2151,11 +2208,10 @@ static int ov8856_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- /* Only one bayer order GRBG is supported */
- if (code->index > 0)
+ if (code->index >= ARRAY_SIZE(ov8856_mbus_codes))
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ code->code = ov8856_mbus_codes[code->index];
return 0;
}
@@ -2165,11 +2221,15 @@ static int ov8856_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_size_enum *fse)
{
struct ov8856 *ov8856 = to_ov8856(sd);
+ int index;
if (fse->index >= ov8856->modes_size)
return -EINVAL;
- if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ for (index = 0; index < ARRAY_SIZE(ov8856_mbus_codes); ++index)
+ if (fse->code == ov8856_mbus_codes[index])
+ break;
+ if (index == ARRAY_SIZE(ov8856_mbus_codes))
return -EINVAL;
fse->min_width = ov8856->priv_lane->supported_modes[fse->index].width;
@@ -2185,7 +2245,7 @@ static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct ov8856 *ov8856 = to_ov8856(sd);
mutex_lock(&ov8856->mutex);
- ov8856_update_pad_format(&ov8856->priv_lane->supported_modes[0],
+ ov8856_update_pad_format(ov8856, &ov8856->priv_lane->supported_modes[0],
v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov8856->mutex);
@@ -2426,6 +2486,7 @@ static int ov8856_probe(struct i2c_client *client)
mutex_init(&ov8856->mutex);
ov8856->cur_mode = &ov8856->priv_lane->supported_modes[0];
+ ov8856->cur_mbus_index = ov8856->cur_mode->default_mbus_index;
ret = ov8856_init_controls(ov8856);
if (ret) {
dev_err(&client->dev, "failed to init controls: %d", ret);
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index f630b88cbfaa..ef976d085d72 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -876,11 +876,10 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
bridge->rx = ep;
/* register async notifier so we get noticed when sensor is connected */
- v4l2_async_notifier_init(&bridge->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &bridge->notifier,
- of_fwnode_handle(ep_node),
- struct v4l2_async_subdev);
+ v4l2_async_nf_init(&bridge->notifier);
+ asd = v4l2_async_nf_add_fwnode_remote(&bridge->notifier,
+ of_fwnode_handle(ep_node),
+ struct v4l2_async_subdev);
of_node_put(ep_node);
if (IS_ERR(asd)) {
@@ -890,10 +889,9 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
}
bridge->notifier.ops = &mipid02_notifier_ops;
- ret = v4l2_async_subdev_notifier_register(&bridge->sd,
- &bridge->notifier);
+ ret = v4l2_async_subdev_nf_register(&bridge->sd, &bridge->notifier);
if (ret)
- v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_nf_cleanup(&bridge->notifier);
return ret;
@@ -1031,8 +1029,8 @@ static int mipid02_probe(struct i2c_client *client)
return 0;
unregister_notifier:
- v4l2_async_notifier_unregister(&bridge->notifier);
- v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_nf_unregister(&bridge->notifier);
+ v4l2_async_nf_cleanup(&bridge->notifier);
power_off:
mipid02_set_power_off(bridge);
entity_cleanup:
@@ -1048,8 +1046,8 @@ static int mipid02_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct mipid02_dev *bridge = to_mipid02_dev(sd);
- v4l2_async_notifier_unregister(&bridge->notifier);
- v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_nf_unregister(&bridge->notifier);
+ v4l2_async_nf_cleanup(&bridge->notifier);
v4l2_async_unregister_subdev(&bridge->sd);
mipid02_set_power_off(bridge);
media_entity_cleanup(&bridge->sd.entity);
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 6070aaf0b32e..8fafce26d62f 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1092,67 +1092,82 @@ tda1997x_detect_std(struct tda1997x_state *state,
struct v4l2_dv_timings *timings)
{
struct v4l2_subdev *sd = &state->sd;
- u32 vper;
- u16 hper;
- u16 hsper;
- int i;
/*
* Read the FMT registers
- * REG_V_PER: Period of a frame (or two fields) in MCLK(27MHz) cycles
- * REG_H_PER: Period of a line in MCLK(27MHz) cycles
- * REG_HS_WIDTH: Period of horiz sync pulse in MCLK(27MHz) cycles
+ * REG_V_PER: Period of a frame (or field) in MCLK (27MHz) cycles
+ * REG_H_PER: Period of a line in MCLK (27MHz) cycles
+ * REG_HS_WIDTH: Period of horiz sync pulse in MCLK (27MHz) cycles
*/
- vper = io_read24(sd, REG_V_PER) & MASK_VPER;
- hper = io_read16(sd, REG_H_PER) & MASK_HPER;
- hsper = io_read16(sd, REG_HS_WIDTH) & MASK_HSWIDTH;
- v4l2_dbg(1, debug, sd, "Signal Timings: %u/%u/%u\n", vper, hper, hsper);
+ u32 vper, vsync_pos;
+ u16 hper, hsync_pos, hsper, interlaced;
+ u16 htot, hact, hfront, hsync, hback;
+ u16 vtot, vact, vfront1, vfront2, vsync, vback1, vback2;
if (!state->input_detect[0] && !state->input_detect[1])
return -ENOLINK;
- for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
- const struct v4l2_bt_timings *bt;
- u32 lines, width, _hper, _hsper;
- u32 vmin, vmax, hmin, hmax, hsmin, hsmax;
- bool vmatch, hmatch, hsmatch;
-
- bt = &v4l2_dv_timings_presets[i].bt;
- width = V4L2_DV_BT_FRAME_WIDTH(bt);
- lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
- _hper = (u32)bt->pixelclock / width;
- if (bt->interlaced)
- lines /= 2;
- /* vper +/- 0.7% */
- vmin = ((27000000 / 1000) * 993) / _hper * lines;
- vmax = ((27000000 / 1000) * 1007) / _hper * lines;
- /* hper +/- 1.0% */
- hmin = ((27000000 / 100) * 99) / _hper;
- hmax = ((27000000 / 100) * 101) / _hper;
- /* hsper +/- 2 (take care to avoid 32bit overflow) */
- _hsper = 27000 * bt->hsync / ((u32)bt->pixelclock/1000);
- hsmin = _hsper - 2;
- hsmax = _hsper + 2;
-
- /* vmatch matches the framerate */
- vmatch = ((vper <= vmax) && (vper >= vmin)) ? 1 : 0;
- /* hmatch matches the width */
- hmatch = ((hper <= hmax) && (hper >= hmin)) ? 1 : 0;
- /* hsmatch matches the hswidth */
- hsmatch = ((hsper <= hsmax) && (hsper >= hsmin)) ? 1 : 0;
- if (hmatch && vmatch && hsmatch) {
- v4l2_print_dv_timings(sd->name, "Detected format: ",
- &v4l2_dv_timings_presets[i],
- false);
- if (timings)
- *timings = v4l2_dv_timings_presets[i];
- return 0;
- }
- }
+ vper = io_read24(sd, REG_V_PER);
+ hper = io_read16(sd, REG_H_PER);
+ hsper = io_read16(sd, REG_HS_WIDTH);
+ vsync_pos = vper & MASK_VPER_SYNC_POS;
+ hsync_pos = hper & MASK_HPER_SYNC_POS;
+ interlaced = hsper & MASK_HSWIDTH_INTERLACED;
+ vper &= MASK_VPER;
+ hper &= MASK_HPER;
+ hsper &= MASK_HSWIDTH;
+ v4l2_dbg(1, debug, sd, "Signal Timings: %u/%u/%u\n", vper, hper, hsper);
+
+ htot = io_read16(sd, REG_FMT_H_TOT);
+ hact = io_read16(sd, REG_FMT_H_ACT);
+ hfront = io_read16(sd, REG_FMT_H_FRONT);
+ hsync = io_read16(sd, REG_FMT_H_SYNC);
+ hback = io_read16(sd, REG_FMT_H_BACK);
+
+ vtot = io_read16(sd, REG_FMT_V_TOT);
+ vact = io_read16(sd, REG_FMT_V_ACT);
+ vfront1 = io_read(sd, REG_FMT_V_FRONT_F1);
+ vfront2 = io_read(sd, REG_FMT_V_FRONT_F2);
+ vsync = io_read(sd, REG_FMT_V_SYNC);
+ vback1 = io_read(sd, REG_FMT_V_BACK_F1);
+ vback2 = io_read(sd, REG_FMT_V_BACK_F2);
+
+ v4l2_dbg(1, debug, sd, "Geometry: H %u %u %u %u %u Sync%c V %u %u %u %u %u %u %u Sync%c\n",
+ htot, hact, hfront, hsync, hback, hsync_pos ? '+' : '-',
+ vtot, vact, vfront1, vfront2, vsync, vback1, vback2, vsync_pos ? '+' : '-');
+
+ if (!timings)
+ return 0;
- v4l_err(state->client, "no resolution match for timings: %d/%d/%d\n",
- vper, hper, hsper);
- return -ERANGE;
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt.width = hact;
+ timings->bt.hfrontporch = hfront;
+ timings->bt.hsync = hsync;
+ timings->bt.hbackporch = hback;
+ timings->bt.height = vact;
+ timings->bt.vfrontporch = vfront1;
+ timings->bt.vsync = vsync;
+ timings->bt.vbackporch = vback1;
+ timings->bt.interlaced = interlaced ? V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
+ timings->bt.polarities = vsync_pos ? V4L2_DV_VSYNC_POS_POL : 0;
+ timings->bt.polarities |= hsync_pos ? V4L2_DV_HSYNC_POS_POL : 0;
+
+ timings->bt.pixelclock = (u64)htot * vtot * 27000000;
+ if (interlaced) {
+ timings->bt.il_vfrontporch = vfront2;
+ timings->bt.il_vsync = timings->bt.vsync;
+ timings->bt.il_vbackporch = vback2;
+ do_div(timings->bt.pixelclock, vper * 2 /* full frame */);
+ } else {
+ timings->bt.il_vfrontporch = 0;
+ timings->bt.il_vsync = 0;
+ timings->bt.il_vbackporch = 0;
+ do_div(timings->bt.pixelclock, vper);
+ }
+ v4l2_find_dv_timings_cap(timings, &tda1997x_dv_timings_cap,
+ (u32)timings->bt.pixelclock / 500, NULL, NULL);
+ v4l2_print_dv_timings(sd->name, "Detected format: ", timings, false);
+ return 0;
}
/* some sort of errata workaround for chip revision 0 (N1) */
@@ -1248,13 +1263,13 @@ tda1997x_parse_infoframe(struct tda1997x_state *state, u16 addr)
{
struct v4l2_subdev *sd = &state->sd;
union hdmi_infoframe frame;
- u8 buffer[40];
+ u8 buffer[40] = { 0 };
u8 reg;
int len, err;
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
- err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
+ err = hdmi_infoframe_unpack(&frame, buffer, len);
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
@@ -1928,13 +1943,13 @@ static int tda1997x_log_infoframe(struct v4l2_subdev *sd, int addr)
{
struct tda1997x_state *state = to_state(sd);
union hdmi_infoframe frame;
- u8 buffer[40];
+ u8 buffer[40] = { 0 };
int len, err;
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
v4l2_dbg(1, debug, sd, "infoframe: addr=%d len=%d\n", addr, len);
- err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
+ err = hdmi_infoframe_unpack(&frame, buffer, len);
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
@@ -2450,7 +2465,8 @@ static const struct media_entity_operations tda1997x_media_ops = {
static int tda1997x_pcm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct tda1997x_state *state = snd_soc_dai_get_drvdata(dai);
+ struct v4l2_subdev *sd = snd_soc_dai_get_drvdata(dai);
+ struct tda1997x_state *state = to_state(sd);
struct snd_soc_component *component = dai->component;
struct snd_pcm_runtime *rtd = substream->runtime;
int rate, err;
@@ -2759,7 +2775,6 @@ static int tda1997x_probe(struct i2c_client *client,
dev_err(&client->dev, "register audio codec failed\n");
goto err_free_media;
}
- dev_set_drvdata(&state->client->dev, state);
v4l_info(state->client, "registered audio codec\n");
}
diff --git a/drivers/media/i2c/tda1997x_regs.h b/drivers/media/i2c/tda1997x_regs.h
index d9b3daada07d..115371ba33f0 100644
--- a/drivers/media/i2c/tda1997x_regs.h
+++ b/drivers/media/i2c/tda1997x_regs.h
@@ -117,9 +117,12 @@
#define REG_CURPAGE_00H 0xFF
#define MASK_VPER 0x3fffff
+#define MASK_VPER_SYNC_POS 0x800000
#define MASK_VHREF 0x3fff
#define MASK_HPER 0x0fff
+#define MASK_HPER_SYNC_POS 0x8000
#define MASK_HSWIDTH 0x03ff
+#define MASK_HSWIDTH_INTERLACED 0x8000
/* HPD Detection */
#define DETECT_UTIL BIT(7) /* utility of HDMI level */
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index de12f38f347c..cb660b4bfd4b 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -441,14 +441,15 @@ static void buffer_queue(struct vb2_buffer *vb)
static int video_i2c_thread_vid_cap(void *priv)
{
struct video_i2c_data *data = priv;
- unsigned int delay = mult_frac(HZ, data->frame_interval.numerator,
- data->frame_interval.denominator);
+ u32 delay = mult_frac(1000000UL, data->frame_interval.numerator,
+ data->frame_interval.denominator);
+ s64 end_us = ktime_to_us(ktime_get());
set_freezable();
do {
- unsigned long start_jiffies = jiffies;
struct video_i2c_buffer *vid_cap_buf = NULL;
+ s64 current_us;
int schedule_delay;
try_to_freeze();
@@ -475,12 +476,14 @@ static int video_i2c_thread_vid_cap(void *priv)
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
- schedule_delay = delay - (jiffies - start_jiffies);
-
- if (time_after(jiffies, start_jiffies + delay))
- schedule_delay = delay;
-
- schedule_timeout_interruptible(schedule_delay);
+ end_us += delay;
+ current_us = ktime_to_us(ktime_get());
+ if (current_us < end_us) {
+ schedule_delay = end_us - current_us;
+ usleep_range(schedule_delay * 3 / 4, schedule_delay);
+ } else {
+ end_us = current_us;
+ }
} while (!kthread_should_stop());
return 0;
diff --git a/drivers/media/mc/Kconfig b/drivers/media/mc/Kconfig
index 4815b9dde9af..375b09612981 100644
--- a/drivers/media/mc/Kconfig
+++ b/drivers/media/mc/Kconfig
@@ -16,13 +16,5 @@ config MEDIA_CONTROLLER_REQUEST_API
bool
depends on MEDIA_CONTROLLER
help
- DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
-
This option enables the Request API for the Media controller and V4L2
interfaces. It is currently needed by a few stateless codec drivers.
-
- There is currently no intention to provide API or ABI stability for
- this new API as of yet.
-
-comment "Please notice that the enabled Media controller Request API is EXPERIMENTAL"
- depends on MEDIA_CONTROLLER_REQUEST_API
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 16af58f2f93c..74edcc76d12f 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -332,8 +332,8 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
}
}
- if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
- ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) {
+ ret = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (ret) {
cobalt_err("no suitable DMA available\n");
goto err_disable;
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index f2440eb38820..59497ba6bf1f 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -804,7 +804,7 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
CX18_ERR("Can't enable device %d!\n", cx->instance);
return -EIO;
}
- if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
CX18_ERR("No suitable DMA available, card %d\n", cx->instance);
return -EIO;
}
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 4864def20676..ce3f0141f94e 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -276,7 +276,7 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
s->pixelformat = fmt->fmt.pix.pixelformat;
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12) {
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16) {
s->vb_bytes_per_frame = h * 720 * 3 / 2;
s->vb_bytes_per_line = 720; /* First plane */
} else {
@@ -470,7 +470,7 @@ static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
.index = 0,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.description = "HM12 (YUV 4:1:1)",
- .pixelformat = V4L2_PIX_FMT_HM12,
+ .pixelformat = V4L2_PIX_FMT_NV12_16L16,
},
{
.index = 1,
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 2f5df471dada..013694bfcb1c 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -325,8 +325,8 @@ void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)
struct cx18_buffer *buf;
list_for_each_entry(buf, &mdl->buf_list, list)
- pci_dma_sync_single_for_device(pci_dev, buf->dma_handle,
- buf_size, dma);
+ dma_sync_single_for_device(&pci_dev->dev, buf->dma_handle,
+ buf_size, dma);
}
int cx18_stream_alloc(struct cx18_stream *s)
@@ -385,8 +385,9 @@ int cx18_stream_alloc(struct cx18_stream *s)
cx18_enqueue(s, mdl, &s->q_idle);
INIT_LIST_HEAD(&buf->list);
- buf->dma_handle = pci_map_single(s->cx->pci_dev,
- buf->buf, s->buf_size, s->dma);
+ buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
+ buf->buf, s->buf_size,
+ s->dma);
cx18_buf_sync_for_cpu(s, buf);
list_add_tail(&buf->list, &s->buf_pool);
}
@@ -419,8 +420,8 @@ void cx18_stream_free(struct cx18_stream *s)
buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);
list_del_init(&buf->list);
- pci_unmap_single(s->cx->pci_dev, buf->dma_handle,
- s->buf_size, s->dma);
+ dma_unmap_single(&s->cx->pci_dev->dev, buf->dma_handle,
+ s->buf_size, s->dma);
kfree(buf->buf);
kfree(buf);
}
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index c41bae118415..87ff554bb2d2 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -49,44 +49,44 @@ static struct {
{ /* CX18_ENC_STREAM_TYPE_MPG */
"encoder MPEG",
VFL_TYPE_VIDEO, 0,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_TS */
"TS",
VFL_TYPE_VIDEO, -1,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_YUV */
"encoder YUV",
VFL_TYPE_VIDEO, CX18_V4L2_ENC_YUV_OFFSET,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE |
V4L2_CAP_READWRITE | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_PCM */
"encoder PCM audio",
VFL_TYPE_VIDEO, CX18_V4L2_ENC_PCM_OFFSET,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
},
{ /* CX18_ENC_STREAM_TYPE_IDX */
"encoder IDX",
VFL_TYPE_VIDEO, -1,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_RAD */
"encoder radio",
VFL_TYPE_RADIO, 0,
- PCI_DMA_NONE,
+ DMA_NONE,
V4L2_CAP_RADIO | V4L2_CAP_TUNER
},
};
@@ -133,7 +133,7 @@ static int cx18_prepare_buffer(struct videobuf_queue *q,
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16)
s->vb_bytes_per_frame = height * 720 * 3 / 2;
else
s->vb_bytes_per_frame = height * 720 * 2;
@@ -155,7 +155,7 @@ static int cx18_prepare_buffer(struct videobuf_queue *q,
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16)
s->vb_bytes_per_frame = height * 720 * 3 / 2;
else
s->vb_bytes_per_frame = height * 720 * 2;
@@ -287,7 +287,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
s, &cx->serialize_lock);
/* Assume the previous pixel default */
- s->pixelformat = V4L2_PIX_FMT_HM12;
+ s->pixelformat = V4L2_PIX_FMT_NV12_16L16;
s->vb_bytes_per_frame = cx->cxhdl.height * 720 * 3 / 2;
s->vb_bytes_per_line = 720;
}
@@ -324,7 +324,7 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
- if (cx18_stream_info[type].dma != PCI_DMA_NONE &&
+ if (cx18_stream_info[type].dma != DMA_NONE &&
cx->stream_buffers[type] == 0) {
CX18_INFO("Disabled %s device\n", cx18_stream_info[type].name);
return 0;
@@ -733,7 +733,7 @@ static void cx18_stream_configure_mdls(struct cx18_stream *s)
* Set the MDL size to the exact size needed for one frame.
* Use enough buffers per MDL to cover the MDL size
*/
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16)
s->mdl_size = 720 * s->cx->cxhdl.height * 3 / 2;
else
s->mdl_size = 720 * s->cx->cxhdl.height * 2;
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index ab14d35214aa..25dc8d4dc5b7 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -550,7 +550,7 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
THIS_MODULE, sizeof(struct cx23885_audio_dev), &card);
if (err < 0)
- goto error;
+ goto error_msg;
chip = (struct cx23885_audio_dev *) card->private_data;
chip->dev = dev;
@@ -576,6 +576,7 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
error:
snd_card_free(card);
+error_msg:
pr_err("%s(): Failed to register analog audio adapter\n",
__func__);
diff --git a/drivers/media/pci/ddbridge/ddbridge-main.c b/drivers/media/pci/ddbridge/ddbridge-main.c
index 03dc9924fa2c..25d0d6745b52 100644
--- a/drivers/media/pci/ddbridge/ddbridge-main.c
+++ b/drivers/media/pci/ddbridge/ddbridge-main.c
@@ -180,8 +180,8 @@ static int ddb_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
return -ENODEV;
dev = vzalloc(sizeof(*dev));
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
index 30d29b96a339..67c467d3c81f 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
@@ -29,6 +29,7 @@ static const struct cio2_sensor_config cio2_supported_sensors[] = {
static const struct cio2_property_names prop_names = {
.clock_frequency = "clock-frequency",
.rotation = "rotation",
+ .orientation = "orientation",
.bus_type = "bus-type",
.data_lanes = "data-lanes",
.remote_endpoint = "remote-endpoint",
@@ -72,11 +73,51 @@ out_free_buff:
return ret;
}
+static u32 cio2_bridge_parse_rotation(struct cio2_sensor *sensor)
+{
+ switch (sensor->ssdb.degree) {
+ case CIO2_SENSOR_ROTATION_NORMAL:
+ return 0;
+ case CIO2_SENSOR_ROTATION_INVERTED:
+ return 180;
+ default:
+ dev_warn(&sensor->adev->dev,
+ "Unknown rotation %d. Assume 0 degree rotation\n",
+ sensor->ssdb.degree);
+ return 0;
+ }
+}
+
+static enum v4l2_fwnode_orientation cio2_bridge_parse_orientation(struct cio2_sensor *sensor)
+{
+ switch (sensor->pld->panel) {
+ case ACPI_PLD_PANEL_FRONT:
+ return V4L2_FWNODE_ORIENTATION_FRONT;
+ case ACPI_PLD_PANEL_BACK:
+ return V4L2_FWNODE_ORIENTATION_BACK;
+ case ACPI_PLD_PANEL_TOP:
+ case ACPI_PLD_PANEL_LEFT:
+ case ACPI_PLD_PANEL_RIGHT:
+ case ACPI_PLD_PANEL_UNKNOWN:
+ return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ default:
+ dev_warn(&sensor->adev->dev, "Unknown _PLD panel value %d\n",
+ sensor->pld->panel);
+ return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ }
+}
+
static void cio2_bridge_create_fwnode_properties(
struct cio2_sensor *sensor,
struct cio2_bridge *bridge,
const struct cio2_sensor_config *cfg)
{
+ u32 rotation;
+ enum v4l2_fwnode_orientation orientation;
+
+ rotation = cio2_bridge_parse_rotation(sensor);
+ orientation = cio2_bridge_parse_orientation(sensor);
+
sensor->prop_names = prop_names;
sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_CIO2_ENDPOINT]);
@@ -85,9 +126,12 @@ static void cio2_bridge_create_fwnode_properties(
sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
sensor->prop_names.clock_frequency,
sensor->ssdb.mclkspeed);
- sensor->dev_properties[1] = PROPERTY_ENTRY_U8(
+ sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
sensor->prop_names.rotation,
- sensor->ssdb.degree);
+ rotation);
+ sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.orientation,
+ orientation);
sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
sensor->prop_names.bus_type,
@@ -159,6 +203,7 @@ static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
for (i = 0; i < bridge->n_sensors; i++) {
sensor = &bridge->sensors[i];
software_node_unregister_nodes(sensor->swnodes);
+ ACPI_FREE(sensor->pld);
acpi_dev_put(sensor->adev);
}
}
@@ -170,6 +215,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
struct fwnode_handle *fwnode;
struct cio2_sensor *sensor;
struct acpi_device *adev;
+ acpi_status status;
int ret;
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
@@ -191,11 +237,15 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
if (ret)
goto err_put_adev;
+ status = acpi_get_physical_device_location(adev->handle, &sensor->pld);
+ if (ACPI_FAILURE(status))
+ goto err_put_adev;
+
if (sensor->ssdb.lanes > CIO2_MAX_LANES) {
dev_err(&adev->dev,
"Number of lanes in SSDB is invalid\n");
ret = -EINVAL;
- goto err_put_adev;
+ goto err_free_pld;
}
cio2_bridge_create_fwnode_properties(sensor, bridge, cfg);
@@ -203,7 +253,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
ret = software_node_register_nodes(sensor->swnodes);
if (ret)
- goto err_put_adev;
+ goto err_free_pld;
fwnode = software_node_fwnode(&sensor->swnodes[
SWNODE_SENSOR_HID]);
@@ -225,6 +275,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
err_free_swnodes:
software_node_unregister_nodes(sensor->swnodes);
+err_free_pld:
+ ACPI_FREE(sensor->pld);
err_put_adev:
acpi_dev_put(adev);
return ret;
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
index dd0ffcafa489..202c7d494f7a 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.h
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.h
@@ -12,6 +12,10 @@
#define CIO2_MAX_LANES 4
#define MAX_NUM_LINK_FREQS 3
+/* Values are educated guesses as we don't have a spec */
+#define CIO2_SENSOR_ROTATION_NORMAL 0
+#define CIO2_SENSOR_ROTATION_INVERTED 1
+
#define CIO2_SENSOR_CONFIG(_HID, _NR, ...) \
(const struct cio2_sensor_config) { \
.hid = _HID, \
@@ -80,6 +84,7 @@ struct cio2_sensor_ssdb {
struct cio2_property_names {
char clock_frequency[16];
char rotation[9];
+ char orientation[12];
char bus_type[9];
char data_lanes[11];
char remote_endpoint[16];
@@ -106,9 +111,11 @@ struct cio2_sensor {
struct cio2_node_names node_names;
struct cio2_sensor_ssdb ssdb;
+ struct acpi_pld_info *pld;
+
struct cio2_property_names prop_names;
struct property_entry ep_properties[5];
- struct property_entry dev_properties[3];
+ struct property_entry dev_properties[4];
struct property_entry cio2_properties[3];
struct software_node_ref_args local_ref[1];
struct software_node_ref_args remote_ref[1];
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
index 47db0ee0fcbf..356ea966cf8d 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
@@ -11,6 +11,7 @@
* et al.
*/
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
@@ -102,26 +103,29 @@ static inline u32 cio2_bytesperline(const unsigned int width)
static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
{
+ struct device *dev = &cio2->pci_dev->dev;
+
if (cio2->dummy_lop) {
- dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
- cio2->dummy_lop, cio2->dummy_lop_bus_addr);
+ dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
+ cio2->dummy_lop_bus_addr);
cio2->dummy_lop = NULL;
}
if (cio2->dummy_page) {
- dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
- cio2->dummy_page, cio2->dummy_page_bus_addr);
+ dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
+ cio2->dummy_page_bus_addr);
cio2->dummy_page = NULL;
}
}
static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
{
+ struct device *dev = &cio2->pci_dev->dev;
unsigned int i;
- cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
+ cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_page_bus_addr,
GFP_KERNEL);
- cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
+ cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_lop_bus_addr,
GFP_KERNEL);
if (!cio2->dummy_page || !cio2->dummy_lop) {
@@ -497,6 +501,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
{
+ struct device *dev = &cio2->pci_dev->dev;
void __iomem *const base = cio2->base;
unsigned int i;
u32 value;
@@ -514,8 +519,7 @@ static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
value, value & CIO2_CDMAC0_DMA_HALTED,
4000, 2000000);
if (ret)
- dev_err(&cio2->pci_dev->dev,
- "DMA %i can not be halted\n", CIO2_DMA_CHAN);
+ dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
for (i = 0; i < CIO2_NUM_PORTS; i++) {
writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
@@ -539,8 +543,7 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
- dev_warn(&cio2->pci_dev->dev,
- "no ready buffers found on DMA channel %u\n",
+ dev_warn(dev, "no ready buffers found on DMA channel %u\n",
dma_chan);
return;
}
@@ -557,8 +560,7 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
q->bufs[q->bufs_first] = NULL;
atomic_dec(&q->bufs_queued);
- dev_dbg(&cio2->pci_dev->dev,
- "buffer %i done\n", b->vbb.vb2_buf.index);
+ dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
b->vbb.vb2_buf.timestamp = ns;
b->vbb.field = V4L2_FIELD_NONE;
@@ -612,6 +614,20 @@ static const char *const cio2_irq_errs[] = {
"non-matching Long Packet stalled",
};
+static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
+{
+ unsigned long csi2_status = status;
+ unsigned int i;
+
+ for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
+ dev_err(dev, "CSI-2 receiver port %i: %s\n",
+ port, cio2_irq_errs[i]);
+
+ if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
+ dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
+ csi2_status, port);
+}
+
static const char *const cio2_port_errs[] = {
"ECC recoverable",
"DPHY not recoverable",
@@ -622,10 +638,19 @@ static const char *const cio2_port_errs[] = {
"PKT2LONG",
};
+static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
+{
+ unsigned long port_status = status;
+ unsigned int i;
+
+ for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
+ dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
+}
+
static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
{
- void __iomem *const base = cio2->base;
struct device *dev = &cio2->pci_dev->dev;
+ void __iomem *const base = cio2->base;
if (int_status & CIO2_INT_IOOE) {
/*
@@ -687,59 +712,32 @@ static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
/* CSI2 receiver (error) interrupt */
- u32 ie_status, ie_clear;
unsigned int port;
+ u32 ie_status;
- ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
- ie_status = ie_clear;
+ ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
for (port = 0; port < CIO2_NUM_PORTS; port++) {
u32 port_status = (ie_status >> (port * 8)) & 0xff;
- u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
- void __iomem *const csi_rx_base =
- base + CIO2_REG_PIPE_BASE(port);
- unsigned int i;
-
- while (port_status & err_mask) {
- i = ffs(port_status) - 1;
- dev_err(dev, "port %i error %s\n",
- port, cio2_port_errs[i]);
- ie_status &= ~BIT(port * 8 + i);
- port_status &= ~BIT(i);
- }
+
+ cio2_irq_log_port_errs(dev, port, port_status);
if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
- u32 csi2_status, csi2_clear;
+ void __iomem *csi_rx_base =
+ base + CIO2_REG_PIPE_BASE(port);
+ u32 csi2_status;
csi2_status = readl(csi_rx_base +
CIO2_REG_IRQCTRL_STATUS);
- csi2_clear = csi2_status;
- err_mask =
- BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
-
- while (csi2_status & err_mask) {
- i = ffs(csi2_status) - 1;
- dev_err(dev,
- "CSI-2 receiver port %i: %s\n",
- port, cio2_irq_errs[i]);
- csi2_status &= ~BIT(i);
- }
-
- writel(csi2_clear,
- csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
- if (csi2_status)
- dev_warn(dev,
- "unknown CSI2 error 0x%x on port %i\n",
- csi2_status, port);
- ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
+ cio2_irq_log_irq_errs(dev, port, csi2_status);
+
+ writel(csi2_status,
+ csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
}
}
- writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
- if (ie_status)
- dev_warn(dev, "unknown interrupt 0x%x on IE\n",
- ie_status);
+ writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
}
@@ -795,16 +793,21 @@ static int cio2_vb2_queue_setup(struct vb2_queue *vq,
struct device *alloc_devs[])
{
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
unsigned int i;
- *num_planes = q->format.num_planes;
+ if (*num_planes && *num_planes < q->format.num_planes)
+ return -EINVAL;
- for (i = 0; i < *num_planes; ++i) {
+ for (i = 0; i < q->format.num_planes; ++i) {
+ if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
+ return -EINVAL;
sizes[i] = q->format.plane_fmt[i].sizeimage;
- alloc_devs[i] = &cio2->pci_dev->dev;
+ alloc_devs[i] = dev;
}
+ *num_planes = q->format.num_planes;
*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
/* Initialize buffer queue */
@@ -824,8 +827,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
struct device *dev = &cio2->pci_dev->dev;
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
+ struct cio2_buffer *b = to_cio2_buffer(vb);
unsigned int pages = PFN_UP(vb->planes[0].length);
unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
struct sg_table *sg;
@@ -879,17 +881,17 @@ fail:
static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q =
container_of(vb->vb2_queue, struct cio2_queue, vbq);
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
+ struct cio2_buffer *b = to_cio2_buffer(vb);
struct cio2_fbpt_entry *entry;
unsigned long flags;
unsigned int i, j, next = q->bufs_next;
int bufs_queued = atomic_inc_return(&q->bufs_queued);
u32 fbpt_rp;
- dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
+ dev_dbg(dev, "queue buffer %d\n", vb->index);
/*
* This code queues the buffer to the CIO2 DMA engine, which starts
@@ -940,12 +942,12 @@ static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
return;
}
- dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
+ dev_dbg(dev, "entry %i was full!\n", next);
next = (next + 1) % CIO2_MAX_BUFFERS;
}
local_irq_restore(flags);
- dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
+ dev_err(dev, "error: all cio2 entries were full!\n");
atomic_dec(&q->bufs_queued);
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
@@ -954,14 +956,14 @@ static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
+ struct device *dev = &cio2->pci_dev->dev;
+ struct cio2_buffer *b = to_cio2_buffer(vb);
unsigned int i;
/* Free LOP table */
for (i = 0; i < CIO2_MAX_LOPS; i++) {
if (b->lop[i])
- dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
+ dma_free_coherent(dev, PAGE_SIZE,
b->lop[i], b->lop_bus_addr[i]);
}
}
@@ -970,14 +972,15 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
int r;
cio2->cur_queue = q;
atomic_set(&q->frame_sequence, 0);
- r = pm_runtime_resume_and_get(&cio2->pci_dev->dev);
+ r = pm_runtime_resume_and_get(dev);
if (r < 0) {
- dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
+ dev_info(dev, "failed to set power %d\n", r);
return r;
}
@@ -1003,9 +1006,9 @@ fail_csi2_subdev:
fail_hw:
media_pipeline_stop(&q->vdev.entity);
fail_pipeline:
- dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
+ dev_dbg(dev, "failed to start streaming (%d)\n", r);
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
- pm_runtime_put(&cio2->pci_dev->dev);
+ pm_runtime_put(dev);
return r;
}
@@ -1014,16 +1017,16 @@ static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
{
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
- dev_err(&cio2->pci_dev->dev,
- "failed to stop sensor streaming\n");
+ dev_err(dev, "failed to stop sensor streaming\n");
cio2_hw_exit(cio2, q);
synchronize_irq(cio2->pci_dev->irq);
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
media_pipeline_stop(&q->vdev.entity);
- pm_runtime_put(&cio2->pci_dev->dev);
+ pm_runtime_put(dev);
cio2->streaming = false;
}
@@ -1311,16 +1314,16 @@ static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
static int cio2_video_link_validate(struct media_link *link)
{
- struct video_device *vd = container_of(link->sink->entity,
- struct video_device, entity);
+ struct media_entity *entity = link->sink->entity;
+ struct video_device *vd = media_entity_to_video_device(entity);
struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
struct cio2_device *cio2 = video_get_drvdata(vd);
+ struct device *dev = &cio2->pci_dev->dev;
struct v4l2_subdev_format source_fmt;
int ret;
- if (!media_entity_remote_pad(link->sink->entity->pads)) {
- dev_info(&cio2->pci_dev->dev,
- "video node %s pad not connected\n", vd->name);
+ if (!media_entity_remote_pad(entity->pads)) {
+ dev_info(dev, "video node %s pad not connected\n", vd->name);
return -ENOTCONN;
}
@@ -1330,8 +1333,7 @@ static int cio2_video_link_validate(struct media_link *link)
if (source_fmt.format.width != q->format.width ||
source_fmt.format.height != q->format.height) {
- dev_err(&cio2->pci_dev->dev,
- "Wrong width or height %ux%u (%ux%u expected)\n",
+ dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
q->format.width, q->format.height,
source_fmt.format.width, source_fmt.format.height);
return -EINVAL;
@@ -1371,15 +1373,15 @@ struct sensor_async_subdev {
struct csi2_bus_info csi2;
};
+#define to_sensor_asd(asd) container_of(asd, struct sensor_async_subdev, asd)
+
/* The .bound() notifier callback when a match is found */
static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
{
- struct cio2_device *cio2 = container_of(notifier,
- struct cio2_device, notifier);
- struct sensor_async_subdev *s_asd = container_of(asd,
- struct sensor_async_subdev, asd);
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
struct cio2_queue *q;
if (cio2->queue[s_asd->csi2.port].sensor)
@@ -1399,10 +1401,8 @@ static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
{
- struct cio2_device *cio2 = container_of(notifier,
- struct cio2_device, notifier);
- struct sensor_async_subdev *s_asd = container_of(asd,
- struct sensor_async_subdev, asd);
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
cio2->queue[s_asd->csi2.port].sensor = NULL;
}
@@ -1410,8 +1410,8 @@ static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
/* .complete() is called after all subdevices have been located */
static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
{
- struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
- notifier);
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct device *dev = &cio2->pci_dev->dev;
struct sensor_async_subdev *s_asd;
struct v4l2_async_subdev *asd;
struct cio2_queue *q;
@@ -1419,7 +1419,7 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
int ret;
list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
- s_asd = container_of(asd, struct sensor_async_subdev, asd);
+ s_asd = to_sensor_asd(asd);
q = &cio2->queue[s_asd->csi2.port];
for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
@@ -1428,8 +1428,7 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
break;
if (pad == q->sensor->entity.num_pads) {
- dev_err(&cio2->pci_dev->dev,
- "failed to find src pad for %s\n",
+ dev_err(dev, "failed to find src pad for %s\n",
q->sensor->name);
return -ENXIO;
}
@@ -1439,8 +1438,7 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
&q->subdev.entity, CIO2_PAD_SINK,
0);
if (ret) {
- dev_err(&cio2->pci_dev->dev,
- "failed to create link for %s\n",
+ dev_err(dev, "failed to create link for %s\n",
q->sensor->name);
return ret;
}
@@ -1457,6 +1455,7 @@ static const struct v4l2_async_notifier_operations cio2_async_ops = {
static int cio2_parse_firmware(struct cio2_device *cio2)
{
+ struct device *dev = &cio2->pci_dev->dev;
unsigned int i;
int ret;
@@ -1467,10 +1466,8 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
struct sensor_async_subdev *s_asd;
struct fwnode_handle *ep;
- ep = fwnode_graph_get_endpoint_by_id(
- dev_fwnode(&cio2->pci_dev->dev), i, 0,
- FWNODE_GRAPH_ENDPOINT_NEXT);
-
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
if (!ep)
continue;
@@ -1478,8 +1475,9 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
if (ret)
goto err_parse;
- s_asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &cio2->notifier, ep, struct sensor_async_subdev);
+ s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
+ struct
+ sensor_async_subdev);
if (IS_ERR(s_asd)) {
ret = PTR_ERR(s_asd);
goto err_parse;
@@ -1502,10 +1500,9 @@ err_parse:
* suspend.
*/
cio2->notifier.ops = &cio2_async_ops;
- ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
+ ret = v4l2_async_nf_register(&cio2->v4l2_dev, &cio2->notifier);
if (ret)
- dev_err(&cio2->pci_dev->dev,
- "failed to register async notifier : %d\n", ret);
+ dev_err(dev, "failed to register async notifier : %d\n", ret);
return ret;
}
@@ -1524,7 +1521,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
static const u32 default_width = 1936;
static const u32 default_height = 1096;
const struct ipu3_cio2_fmt dflt_fmt = formats[0];
-
+ struct device *dev = &cio2->pci_dev->dev;
struct video_device *vdev = &q->vdev;
struct vb2_queue *vbq = &q->vbq;
struct v4l2_subdev *subdev = &q->subdev;
@@ -1566,8 +1563,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
subdev->internal_ops = &cio2_subdev_internal_ops;
r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize subdev media entity (%d)\n", r);
+ dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
goto fail_subdev_media_entity;
}
@@ -1575,8 +1571,8 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
vdev->entity.ops = &cio2_video_entity_ops;
r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize videodev media entity (%d)\n", r);
+ dev_err(dev, "failed initialize videodev media entity (%d)\n",
+ r);
goto fail_vdev_media_entity;
}
@@ -1590,8 +1586,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
v4l2_set_subdevdata(subdev, cio2);
r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize subdev (%d)\n", r);
+ dev_err(dev, "failed initialize subdev (%d)\n", r);
goto fail_subdev;
}
@@ -1607,8 +1602,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
vbq->lock = &q->lock;
r = vb2_queue_init(vbq);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed to initialize videobuf2 queue (%d)\n", r);
+ dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
goto fail_subdev;
}
@@ -1625,8 +1619,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
video_set_drvdata(vdev, cio2);
r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed to register video device (%d)\n", r);
+ dev_err(dev, "failed to register video device (%d)\n", r);
goto fail_vdev;
}
@@ -1648,7 +1641,7 @@ fail_subdev:
fail_vdev_media_entity:
media_entity_cleanup(&subdev->entity);
fail_subdev_media_entity:
- cio2_fbpt_exit(q, &cio2->pci_dev->dev);
+ cio2_fbpt_exit(q, dev);
fail_fbpt:
mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
@@ -1715,11 +1708,12 @@ static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
static int cio2_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
- struct fwnode_handle *fwnode = dev_fwnode(&pci_dev->dev);
+ struct device *dev = &pci_dev->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct cio2_device *cio2;
int r;
- cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
+ cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
if (!cio2)
return -ENOMEM;
cio2->pci_dev = pci_dev;
@@ -1732,7 +1726,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
r = cio2_check_fwnode_graph(fwnode);
if (r) {
if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
- dev_err(&pci_dev->dev, "fwnode graph has no endpoints connected\n");
+ dev_err(dev, "fwnode graph has no endpoints connected\n");
return -EINVAL;
}
@@ -1743,16 +1737,16 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
r = pcim_enable_device(pci_dev);
if (r) {
- dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
+ dev_err(dev, "failed to enable device (%d)\n", r);
return r;
}
- dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
+ dev_info(dev, "device 0x%x (rev: 0x%x)\n",
pci_dev->device, pci_dev->revision);
r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
if (r) {
- dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
+ dev_err(dev, "failed to remap I/O memory (%d)\n", r);
return -ENODEV;
}
@@ -1762,15 +1756,15 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
- r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
+ r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
if (r) {
- dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
+ dev_err(dev, "failed to set DMA mask (%d)\n", r);
return -ENODEV;
}
r = pci_enable_msi(pci_dev);
if (r) {
- dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
+ dev_err(dev, "failed to enable MSI (%d)\n", r);
return r;
}
@@ -1780,7 +1774,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
mutex_init(&cio2->lock);
- cio2->media_dev.dev = &cio2->pci_dev->dev;
+ cio2->media_dev.dev = dev;
strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
sizeof(cio2->media_dev.model));
snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
@@ -1793,10 +1787,9 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
goto fail_mutex_destroy;
cio2->v4l2_dev.mdev = &cio2->media_dev;
- r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
+ r = v4l2_device_register(dev, &cio2->v4l2_dev);
if (r) {
- dev_err(&pci_dev->dev,
- "failed to register V4L2 device (%d)\n", r);
+ dev_err(dev, "failed to register V4L2 device (%d)\n", r);
goto fail_media_device_unregister;
}
@@ -1804,28 +1797,28 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
if (r)
goto fail_v4l2_device_unregister;
- v4l2_async_notifier_init(&cio2->notifier);
+ v4l2_async_nf_init(&cio2->notifier);
/* Register notifier for subdevices we care */
r = cio2_parse_firmware(cio2);
if (r)
goto fail_clean_notifier;
- r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
- IRQF_SHARED, CIO2_NAME, cio2);
+ r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
+ CIO2_NAME, cio2);
if (r) {
- dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
+ dev_err(dev, "failed to request IRQ (%d)\n", r);
goto fail_clean_notifier;
}
- pm_runtime_put_noidle(&pci_dev->dev);
- pm_runtime_allow(&pci_dev->dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_allow(dev);
return 0;
fail_clean_notifier:
- v4l2_async_notifier_unregister(&cio2->notifier);
- v4l2_async_notifier_cleanup(&cio2->notifier);
+ v4l2_async_nf_unregister(&cio2->notifier);
+ v4l2_async_nf_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
fail_v4l2_device_unregister:
v4l2_device_unregister(&cio2->v4l2_dev);
@@ -1844,8 +1837,8 @@ static void cio2_pci_remove(struct pci_dev *pci_dev)
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
media_device_unregister(&cio2->media_dev);
- v4l2_async_notifier_unregister(&cio2->notifier);
- v4l2_async_notifier_cleanup(&cio2->notifier);
+ v4l2_async_nf_unregister(&cio2->notifier);
+ v4l2_async_nf_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
cio2_fbpt_exit_dummy(cio2);
v4l2_device_unregister(&cio2->v4l2_dev);
@@ -2005,10 +1998,9 @@ static int __maybe_unused cio2_resume(struct device *dev)
if (!cio2->streaming)
return 0;
/* Start stream */
- r = pm_runtime_force_resume(&cio2->pci_dev->dev);
+ r = pm_runtime_force_resume(dev);
if (r < 0) {
- dev_err(&cio2->pci_dev->dev,
- "failed to set power %d\n", r);
+ dev_err(dev, "failed to set power %d\n", r);
return r;
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index 3806d7f04d69..3a1f394e05aa 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -338,6 +338,8 @@ struct cio2_buffer {
unsigned int offset;
};
+#define to_cio2_buffer(vb) container_of(vb, struct cio2_buffer, vbb.vb2_buf)
+
struct csi2_bus_info {
u32 port;
u32 lanes;
@@ -399,6 +401,8 @@ struct cio2_device {
dma_addr_t dummy_lop_bus_addr;
};
+#define to_cio2_device(n) container_of(n, struct cio2_device, notifier)
+
/**************** Virtual channel ****************/
/*
* This should come from sensor driver. No
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 8ebc97ebf1a2..57d4d5485d7a 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -837,7 +837,7 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
IVTV_ERR("Can't enable device!\n");
return -EIO;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
IVTV_ERR("No suitable DMA available.\n");
return -EIO;
}
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index da19b2e95e6c..0cdf6b3210c2 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -339,7 +339,7 @@ static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
pixfmt->field = V4L2_FIELD_INTERLACED;
if (id->type == IVTV_ENC_STREAM_TYPE_YUV) {
- pixfmt->pixelformat = V4L2_PIX_FMT_HM12;
+ pixfmt->pixelformat = V4L2_PIX_FMT_NV12_16L16;
/* YUV size is (Y=(h*720) + UV=(h*(720/2))) */
pixfmt->sizeimage = pixfmt->height * 720 * 3 / 2;
pixfmt->bytesperline = 720;
@@ -417,7 +417,7 @@ static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
pixfmt->field = V4L2_FIELD_ANY;
break;
}
- pixfmt->pixelformat = V4L2_PIX_FMT_HM12;
+ pixfmt->pixelformat = V4L2_PIX_FMT_NV12_16L16;
pixfmt->bytesperline = 720;
pixfmt->width = itv->yuv_info.v4l2_src_w;
pixfmt->height = itv->yuv_info.v4l2_src_h;
@@ -917,7 +917,7 @@ static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
static const struct v4l2_fmtdesc hm12 = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.description = "HM12 (YUV 4:2:0)",
- .pixelformat = V4L2_PIX_FMT_HM12,
+ .pixelformat = V4L2_PIX_FMT_NV12_16L16,
};
static const struct v4l2_fmtdesc mpeg = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
@@ -944,7 +944,7 @@ static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdes
static const struct v4l2_fmtdesc hm12 = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
.description = "HM12 (YUV 4:2:0)",
- .pixelformat = V4L2_PIX_FMT_HM12,
+ .pixelformat = V4L2_PIX_FMT_NV12_16L16,
};
static const struct v4l2_fmtdesc mpeg = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
diff --git a/drivers/media/pci/ivtv/ivtv-queue.c b/drivers/media/pci/ivtv/ivtv-queue.c
index 7ac4615e92ea..f9b192ab7e7c 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.c
+++ b/drivers/media/pci/ivtv/ivtv-queue.c
@@ -188,7 +188,7 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
return 0;
IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
- s->dma != PCI_DMA_NONE ? "DMA " : "",
+ s->dma != DMA_NONE ? "DMA " : "",
s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
@@ -218,8 +218,9 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
return -ENOMEM;
}
if (ivtv_might_use_dma(s)) {
- s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
- sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
+ s->sg_handle = dma_map_single(&itv->pdev->dev, s->sg_dma,
+ sizeof(struct ivtv_sg_element),
+ DMA_TO_DEVICE);
ivtv_stream_sync_for_cpu(s);
}
@@ -237,7 +238,7 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
}
INIT_LIST_HEAD(&buf->list);
if (ivtv_might_use_dma(s)) {
- buf->dma_handle = pci_map_single(s->itv->pdev,
+ buf->dma_handle = dma_map_single(&s->itv->pdev->dev,
buf->buf, s->buf_size + 256, s->dma);
ivtv_buf_sync_for_cpu(s, buf);
}
@@ -260,8 +261,8 @@ void ivtv_stream_free(struct ivtv_stream *s)
/* empty q_free */
while ((buf = ivtv_dequeue(s, &s->q_free))) {
if (ivtv_might_use_dma(s))
- pci_unmap_single(s->itv->pdev, buf->dma_handle,
- s->buf_size + 256, s->dma);
+ dma_unmap_single(&s->itv->pdev->dev, buf->dma_handle,
+ s->buf_size + 256, s->dma);
kfree(buf->buf);
kfree(buf);
}
@@ -269,8 +270,9 @@ void ivtv_stream_free(struct ivtv_stream *s)
/* Free SG Array/Lists */
if (s->sg_dma != NULL) {
if (s->sg_handle != IVTV_DMA_UNMAPPED) {
- pci_unmap_single(s->itv->pdev, s->sg_handle,
- sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
+ dma_unmap_single(&s->itv->pdev->dev, s->sg_handle,
+ sizeof(struct ivtv_sg_element),
+ DMA_TO_DEVICE);
s->sg_handle = IVTV_DMA_UNMAPPED;
}
kfree(s->sg_pending);
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index f04ee84bab5f..6e455948cc77 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -100,7 +100,7 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_MPG */
"encoder MPG",
VFL_TYPE_VIDEO, 0,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -108,7 +108,7 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_YUV */
"encoder YUV",
VFL_TYPE_VIDEO, IVTV_V4L2_ENC_YUV_OFFSET,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -116,7 +116,7 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -124,42 +124,42 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_PCM */
"encoder PCM",
VFL_TYPE_VIDEO, IVTV_V4L2_ENC_PCM_OFFSET,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_RAD */
"encoder radio",
VFL_TYPE_RADIO, 0,
- PCI_DMA_NONE, 1,
+ DMA_NONE, 1,
V4L2_CAP_RADIO | V4L2_CAP_TUNER,
&ivtv_v4l2_radio_fops
},
{ /* IVTV_DEC_STREAM_TYPE_MPG */
"decoder MPG",
VFL_TYPE_VIDEO, IVTV_V4L2_DEC_MPG_OFFSET,
- PCI_DMA_TODEVICE, 0,
+ DMA_TO_DEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VBI */
"decoder VBI",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VBI_OFFSET,
- PCI_DMA_NONE, 1,
+ DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VOUT */
"decoder VOUT",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VOUT_OFFSET,
- PCI_DMA_NONE, 1,
+ DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_YUV */
"decoder YUV",
VFL_TYPE_VIDEO, IVTV_V4L2_DEC_YUV_OFFSET,
- PCI_DMA_TODEVICE, 0,
+ DMA_TO_DEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
}
@@ -179,7 +179,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
s->caps = ivtv_stream_info[type].v4l2_caps;
if (ivtv_stream_info[type].pio)
- s->dma = PCI_DMA_NONE;
+ s->dma = DMA_NONE;
else
s->dma = ivtv_stream_info[type].dma;
s->buf_size = itv->stream_buf_size[type];
@@ -217,7 +217,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
- if (ivtv_stream_info[type].dma != PCI_DMA_NONE &&
+ if (ivtv_stream_info[type].dma != DMA_NONE &&
itv->options.kilobytes[type] == 0) {
IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name);
return 0;
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 0d8372cc364a..210be8290f24 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -81,8 +81,10 @@ void ivtv_udma_alloc(struct ivtv *itv)
{
if (itv->udma.SG_handle == 0) {
/* Map DMA Page Array Buffer */
- itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
- sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
+ itv->udma.SG_handle = dma_map_single(&itv->pdev->dev,
+ itv->udma.SGarray,
+ sizeof(itv->udma.SGarray),
+ DMA_TO_DEVICE);
ivtv_udma_sync_for_cpu(itv);
}
}
@@ -135,7 +137,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Map SG List */
- dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
+ dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ dma->page_count, DMA_TO_DEVICE);
/* Fill SG Array with new values */
ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
@@ -159,7 +162,8 @@ void ivtv_udma_unmap(struct ivtv *itv)
/* Unmap Scatterlist */
if (dma->SG_length) {
- pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
+ dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count,
+ DMA_TO_DEVICE);
dma->SG_length = 0;
}
/* sync DMA */
@@ -175,13 +179,14 @@ void ivtv_udma_free(struct ivtv *itv)
/* Unmap SG Array */
if (itv->udma.SG_handle) {
- pci_unmap_single(itv->pdev, itv->udma.SG_handle,
- sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
+ dma_unmap_single(&itv->pdev->dev, itv->udma.SG_handle,
+ sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
}
/* Unmap Scatterlist */
if (itv->udma.SG_length) {
- pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
+ dma_unmap_sg(&itv->pdev->dev, itv->udma.SGlist,
+ itv->udma.page_count, DMA_TO_DEVICE);
}
for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 5f7dc9771f8d..e79e8a5a744a 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -113,7 +113,8 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
dma->page_count = 0;
return -ENOMEM;
}
- dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
+ dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ dma->page_count, DMA_TO_DEVICE);
/* Fill SG Array with new values */
ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
@@ -920,7 +921,9 @@ static void ivtv_yuv_init(struct ivtv *itv)
/* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */
yi->blanking_ptr = kzalloc(720 * 16, GFP_ATOMIC|__GFP_NOWARN);
if (yi->blanking_ptr) {
- yi->blanking_dmaptr = pci_map_single(itv->pdev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE);
+ yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev,
+ yi->blanking_ptr,
+ 720 * 16, DMA_TO_DEVICE);
} else {
yi->blanking_dmaptr = 0;
IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
@@ -1264,7 +1267,8 @@ void ivtv_yuv_close(struct ivtv *itv)
if (yi->blanking_ptr) {
kfree(yi->blanking_ptr);
yi->blanking_ptr = NULL;
- pci_unmap_single(itv->pdev, yi->blanking_dmaptr, 720*16, PCI_DMA_TODEVICE);
+ dma_unmap_single(&itv->pdev->dev, yi->blanking_dmaptr,
+ 720 * 16, DMA_TO_DEVICE);
}
/* Invalidate the old dimension information */
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index e2d56dca5be4..2c43ebf83966 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -36,7 +36,7 @@
#include <linux/fb.h>
#include <linux/ivtvfb.h>
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
#include <asm/memtype.h>
#endif
@@ -48,8 +48,8 @@ static bool osd_laced;
static int osd_depth;
static int osd_upper;
static int osd_left;
-static int osd_yres;
-static int osd_xres;
+static unsigned int osd_yres;
+static unsigned int osd_xres;
module_param(ivtvfb_card_id, int, 0444);
module_param_named(debug,ivtvfb_debug, int, 0644);
@@ -58,8 +58,8 @@ module_param(osd_laced, bool, 0444);
module_param(osd_depth, int, 0444);
module_param(osd_upper, int, 0444);
module_param(osd_left, int, 0444);
-module_param(osd_yres, int, 0444);
-module_param(osd_xres, int, 0444);
+module_param(osd_yres, uint, 0444);
+module_param(osd_xres, uint, 0444);
MODULE_PARM_DESC(ivtvfb_card_id,
"Only use framebuffer of the specified ivtv card (0-31)\n"
@@ -1157,7 +1157,7 @@ static int ivtvfb_init_card(struct ivtv *itv)
{
int rc;
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
if (pat_enabled()) {
if (ivtvfb_force_pat) {
pr_info("PAT is enabled. Write-combined framebuffer caching will be disabled.\n");
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 6f3125c2d097..8287851b5ffd 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -258,19 +258,24 @@ static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
/* IRQ is being signaled */
reg_isr = readw(ndev->bmmio0 + REG_ISR);
- if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
- iret = netup_i2c_interrupt(&ndev->i2c[0]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
- iret = netup_i2c_interrupt(&ndev->i2c[1]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
+ if (reg_isr & NETUP_UNIDVB_IRQ_SPI)
iret = netup_spi_interrupt(ndev->spi);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
- iret = netup_dma_interrupt(&ndev->dma[0]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
- iret = netup_dma_interrupt(&ndev->dma[1]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
- iret = netup_ci_interrupt(ndev);
+ else if (!ndev->old_fw) {
+ if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
+ iret = netup_i2c_interrupt(&ndev->i2c[0]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
+ iret = netup_i2c_interrupt(&ndev->i2c[1]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
+ iret = netup_dma_interrupt(&ndev->dma[0]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
+ iret = netup_dma_interrupt(&ndev->dma[1]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
+ iret = netup_ci_interrupt(ndev);
+ } else {
+ goto err;
+ }
} else {
+err:
dev_err(&pci_dev->dev,
"%s(): unknown interrupt 0x%x\n",
__func__, reg_isr);
@@ -841,7 +846,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
"%s(): board vendor 0x%x, revision 0x%x\n",
__func__, board_vendor, board_revision);
pci_set_master(pci_dev);
- if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
+ if (dma_set_mask(&pci_dev->dev, 0xffffffff) < 0) {
dev_err(&pci_dev->dev,
"%s(): 32bit PCI DMA is not supported\n", __func__);
goto pci_detect_err;
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index f1f4793a4452..6ac9b9bd7435 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -228,16 +228,16 @@ static void pluto_set_dma_addr(struct pluto *pluto)
static int pluto_dma_map(struct pluto *pluto)
{
- pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ pluto->dma_addr = dma_map_single(&pluto->pdev->dev, pluto->dma_buf,
+ TS_DMA_BYTES, DMA_FROM_DEVICE);
- return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
+ return dma_mapping_error(&pluto->pdev->dev, pluto->dma_addr);
}
static void pluto_dma_unmap(struct pluto *pluto)
{
- pci_unmap_single(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pluto->pdev->dev, pluto->dma_addr, TS_DMA_BYTES,
+ DMA_FROM_DEVICE);
}
static int pluto_start_feed(struct dvb_demux_feed *f)
@@ -276,8 +276,8 @@ static void pluto_dma_end(struct pluto *pluto, unsigned int nbpackets)
{
/* synchronize the DMA transfer with the CPU
* first so that we see updated contents. */
- pci_dma_sync_single_for_cpu(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pluto->pdev->dev, pluto->dma_addr,
+ TS_DMA_BYTES, DMA_FROM_DEVICE);
/* Workaround for broken hardware:
* [1] On startup NBPACKETS seems to contain an uninitialized value,
@@ -310,8 +310,8 @@ static void pluto_dma_end(struct pluto *pluto, unsigned int nbpackets)
pluto_set_dma_addr(pluto);
/* sync the buffer and give it back to the card */
- pci_dma_sync_single_for_device(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pluto->pdev->dev, pluto->dma_addr,
+ TS_DMA_BYTES, DMA_FROM_DEVICE);
}
static irqreturn_t pluto_irq(int irq, void *dev_id)
@@ -595,7 +595,7 @@ static int pluto2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable interrupts */
pci_write_config_dword(pdev, 0x6c, 0x8000);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret < 0)
goto err_pci_disable_device;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index f2aa36814fba..121a4a92ea10 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1340,7 +1340,7 @@ static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto err;
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret < 0)
goto err_pci_disable_device;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index ce449c941171..0d82a4b27d5b 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5765,6 +5765,33 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0200000,
},
},
+ [SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H] = {
+ .name = "Leadtek Winfast HDTV200 H",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_DVB,
+ .ts_type = SAA7134_MPEG_TS_PARALLEL,
+ .gpiomask = 0x00200700,
+ .inputs = { {
+ .type = SAA7134_INPUT_TV,
+ .vmux = 1,
+ .amux = TV,
+ .gpio = 0x00000300,
+ }, {
+ .type = SAA7134_INPUT_COMPOSITE,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x00200300,
+ }, {
+ .type = SAA7134_INPUT_SVIDEO,
+ .vmux = 8,
+ .amux = LINE1,
+ .gpio = 0x00200300,
+ } },
+ },
};
const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -7041,6 +7068,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x13cf,
.driver_data = SAA7134_BOARD_SNAZIO_TVPVR_PRO,
}, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x107d,
+ .subdevice = 0x6f2e,
+ .driver_data = SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H,
+ }, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -7245,6 +7278,22 @@ static int saa7134_kworld_pc150u_toggle_agc(struct saa7134_dev *dev,
return 0;
}
+static int saa7134_leadtek_hdtv200h_toggle_agc(struct saa7134_dev *dev,
+ enum tda18271_mode mode)
+{
+ switch (mode) {
+ case TDA18271_ANALOG:
+ saa7134_set_gpio(dev, 10, 0);
+ break;
+ case TDA18271_DIGITAL:
+ saa7134_set_gpio(dev, 10, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev,
int command, int arg)
{
@@ -7264,6 +7313,9 @@ static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev,
case SAA7134_BOARD_KWORLD_PC150U:
ret = saa7134_kworld_pc150u_toggle_agc(dev, arg);
break;
+ case SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H:
+ ret = saa7134_leadtek_hdtv200h_toggle_agc(dev, arg);
+ break;
default:
break;
}
@@ -7287,6 +7339,7 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev,
case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
case SAA7134_BOARD_KWORLD_PC150U:
case SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2:
+ case SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H:
/* tda8290 + tda18271 */
ret = saa7134_tda8290_18271_callback(dev, command, arg);
break;
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index f359cd5c006a..d17a1b15faee 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1189,6 +1189,22 @@ static struct s5h1411_config kworld_s5h1411_config = {
S5H1411_MPEGTIMING_CONTINUOUS_NONINVERTING_CLOCK,
};
+static struct tda18271_config hdtv200h_tda18271_config = {
+ .gate = TDA18271_GATE_ANALOG,
+ .config = 3 /* Use tuner callback for AGC */
+};
+
+static struct s5h1411_config hdtv200h_s5h1411_config = {
+ .output_mode = S5H1411_PARALLEL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .qam_if = S5H1411_IF_4000,
+ .vsb_if = S5H1411_IF_3250,
+ .inversion = S5H1411_INVERSION_ON,
+ .status_mode = S5H1411_DEMODLOCKING,
+ .mpeg_timing =
+ S5H1411_MPEGTIMING_CONTINUOUS_NONINVERTING_CLOCK,
+};
+
/* ==================================================================
* Core code
@@ -1854,6 +1870,19 @@ static int dvb_init(struct saa7134_dev *dev)
__func__);
}
break;
+ case SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H:
+ fe0->dvb.frontend = dvb_attach(s5h1411_attach,
+ &hdtv200h_s5h1411_config,
+ &dev->i2c_adap);
+ if (fe0->dvb.frontend) {
+ dvb_attach(tda829x_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, 0x4b,
+ &tda829x_no_probe);
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_adap,
+ &hdtv200h_tda18271_config);
+ }
+ break;
default:
pr_warn("Huh? unknown DVB card?\n");
break;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index d29499cd7370..49fe0f6bacba 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -328,6 +328,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_AVERMEDIA_505 194
#define SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM 195
#define SAA7134_BOARD_SNAZIO_TVPVR_PRO 196
+#define SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H 197
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index 4ddd0f5b50f1..5526bcc7a9bd 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -1057,8 +1057,6 @@ static int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
dprintk(DBGLVL_API, " numformats = 0x%x\n",
vcoutputtermhdr->numformats);
- t = (struct tmComResDescrHeader *)
- ((struct tmComResDMATermDescrHeader *)(buf + idx));
next_offset = idx + (vcoutputtermhdr->len);
for (i = 0; i < vcoutputtermhdr->numformats; i++) {
t = (struct tmComResDescrHeader *)
diff --git a/drivers/media/pci/tw5864/tw5864-core.c b/drivers/media/pci/tw5864/tw5864-core.c
index 282f7dfb7aaf..23d3cae54a5d 100644
--- a/drivers/media/pci/tw5864/tw5864-core.c
+++ b/drivers/media/pci/tw5864/tw5864-core.c
@@ -262,7 +262,7 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
- err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&dev->pci->dev, "32 bit PCI DMA is not supported\n");
goto disable_pci;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 80321e03809a..cf4adc64c953 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -200,6 +200,22 @@ config VIDEO_TI_CAL_MC
endif # VIDEO_TI_CAL
+config VIDEO_RCAR_ISP
+ tristate "R-Car Image Signal Processor (ISP)"
+ depends on VIDEO_V4L2 && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car Image Signal Processor (ISP).
+ Enable this to support the Renesas R-Car Image Signal
+ Processor (ISP).
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-isp.
+
endif # V4L_PLATFORM_DRIVERS
menuconfig V4L_MEM2MEM_DRIVERS
@@ -314,6 +330,9 @@ config VIDEO_MEDIATEK_VCODEC
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
select VIDEO_MEDIATEK_VCODEC_SCP if MTK_SCP
+ select V4L2_H264
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
help
Mediatek video codec driver provides HW capability to
encode and decode in a range of video formats on MT8173
@@ -635,6 +654,7 @@ config VIDEO_RCAR_DRIF
depends on VIDEO_V4L2
depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_VMALLOC
+ select V4L2_ASYNC
help
Say Y if you want to enable R-Car Gen3 DRIF support. DRIF is Digital
Radio Interface that interfaces with an RF front end chip. It is a
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 73ce083c2fc6..a148553babfc 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
obj-$(CONFIG_VIDEO_XILINX) += xilinx/
+obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel/
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index 887b492e4ad1..c8156da33043 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -6,16 +6,20 @@
*/
#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/firmware.h>
#include <linux/gcd.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/log2.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/xlnx-vcu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -101,6 +105,12 @@
#define BETA_OFFSET_DIV_2 -1
#define TC_OFFSET_DIV_2 -1
+/*
+ * This control allows applications to explicitly disable the encoder buffer.
+ * This value is Allegro specific.
+ */
+#define V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER (V4L2_CID_USER_ALLEGRO_BASE + 0)
+
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
@@ -125,6 +135,13 @@ struct allegro_mbox {
struct mutex lock;
};
+struct allegro_encoder_buffer {
+ unsigned int size;
+ unsigned int color_depth;
+ unsigned int num_cores;
+ unsigned int clk_rate;
+};
+
struct allegro_dev {
struct v4l2_device v4l2_dev;
struct video_device video_dev;
@@ -136,12 +153,19 @@ struct allegro_dev {
struct regmap *regmap;
struct regmap *sram;
+ struct regmap *settings;
+
+ struct clk *clk_core;
+ struct clk *clk_mcu;
const struct fw_info *fw_info;
struct allegro_buffer firmware;
struct allegro_buffer suballocator;
+ bool has_encoder_buffer;
+ struct allegro_encoder_buffer encoder_buffer;
struct completion init_complete;
+ bool initialized;
/* The mailbox interface */
struct allegro_mbox *mbox_command;
@@ -257,6 +281,8 @@ struct allegro_channel {
struct v4l2_ctrl *mpeg_video_cpb_size;
struct v4l2_ctrl *mpeg_video_gop_size;
+ struct v4l2_ctrl *encoder_buffer;
+
/* user_id is used to identify the channel during CREATE_CHANNEL */
/* not sure, what to set here and if this is actually required */
int user_id;
@@ -921,6 +947,52 @@ out:
kfree(msg);
}
+static int allegro_encoder_buffer_init(struct allegro_dev *dev,
+ struct allegro_encoder_buffer *buffer)
+{
+ int err;
+ struct regmap *settings = dev->settings;
+ unsigned int supports_10_bit;
+ unsigned int memory_depth;
+ unsigned int num_cores;
+ unsigned int color_depth;
+ unsigned long clk_rate;
+
+ /* We don't support the encoder buffer pre Firmware version 2019.2 */
+ if (dev->fw_info->mailbox_version < MCU_MSG_VERSION_2019_2)
+ return -ENODEV;
+
+ if (!settings)
+ return -EINVAL;
+
+ err = regmap_read(settings, VCU_ENC_COLOR_DEPTH, &supports_10_bit);
+ if (err < 0)
+ return err;
+ err = regmap_read(settings, VCU_MEMORY_DEPTH, &memory_depth);
+ if (err < 0)
+ return err;
+ err = regmap_read(settings, VCU_NUM_CORE, &num_cores);
+ if (err < 0)
+ return err;
+
+ clk_rate = clk_get_rate(dev->clk_core);
+ if (clk_rate == 0)
+ return -EINVAL;
+
+ color_depth = supports_10_bit ? 10 : 8;
+ /* The firmware expects the encoder buffer size in bits. */
+ buffer->size = color_depth * 32 * memory_depth;
+ buffer->color_depth = color_depth;
+ buffer->num_cores = num_cores;
+ buffer->clk_rate = clk_rate;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "using %d bits encoder buffer with %d-bit color depth\n",
+ buffer->size, color_depth);
+
+ return 0;
+}
+
static void allegro_mcu_send_init(struct allegro_dev *dev,
dma_addr_t suballoc_dma, size_t suballoc_size)
{
@@ -934,10 +1006,17 @@ static void allegro_mcu_send_init(struct allegro_dev *dev,
msg.suballoc_dma = to_mcu_addr(dev, suballoc_dma);
msg.suballoc_size = to_mcu_size(dev, suballoc_size);
- /* disable L2 cache */
- msg.l2_cache[0] = -1;
- msg.l2_cache[1] = -1;
- msg.l2_cache[2] = -1;
+ if (dev->has_encoder_buffer) {
+ msg.encoder_buffer_size = dev->encoder_buffer.size;
+ msg.encoder_buffer_color_depth = dev->encoder_buffer.color_depth;
+ msg.num_cores = dev->encoder_buffer.num_cores;
+ msg.clk_rate = dev->encoder_buffer.clk_rate;
+ } else {
+ msg.encoder_buffer_size = -1;
+ msg.encoder_buffer_color_depth = -1;
+ msg.num_cores = -1;
+ msg.clk_rate = -1;
+ }
allegro_mbox_send(dev->mbox_command, &msg);
}
@@ -1184,9 +1263,8 @@ static int fill_create_channel_param(struct allegro_channel *channel,
param->max_transfo_depth_intra = channel->max_transfo_depth_intra;
param->max_transfo_depth_inter = channel->max_transfo_depth_inter;
- param->prefetch_auto = 0;
- param->prefetch_mem_offset = 0;
- param->prefetch_mem_size = 0;
+ param->encoder_buffer_enabled = v4l2_ctrl_g_ctrl(channel->encoder_buffer);
+ param->encoder_buffer_offset = 0;
param->rate_control_mode = channel->frame_rc_enable ?
v4l2_bitrate_mode_to_mcu_mode(bitrate_mode) : 0;
@@ -1311,6 +1389,7 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
u64 src_handle)
{
struct mcu_msg_encode_frame msg;
+ bool use_encoder_buffer = v4l2_ctrl_g_ctrl(channel->encoder_buffer);
memset(&msg, 0, sizeof(msg));
@@ -1319,6 +1398,8 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
msg.channel_id = channel->mcu_channel_id;
msg.encoding_options = AL_OPT_FORCE_LOAD;
+ if (use_encoder_buffer)
+ msg.encoding_options |= AL_OPT_USE_L2;
msg.pps_qp = 26; /* qp are relative to 26 */
msg.user_param = 0; /* copied to mcu_msg_encode_frame_response */
/* src_handle is copied to mcu_msg_encode_frame_response */
@@ -1326,8 +1407,6 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
msg.src_y = to_codec_addr(dev, src_y);
msg.src_uv = to_codec_addr(dev, src_uv);
msg.stride = channel->stride;
- msg.ep2 = 0x0;
- msg.ep2_v = to_mcu_addr(dev, msg.ep2);
allegro_mbox_send(dev->mbox_command, &msg);
@@ -1509,14 +1588,14 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_profile);
level = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_level);
- sps->profile_idc = nal_h264_profile_from_v4l2(profile);
+ sps->profile_idc = nal_h264_profile(profile);
sps->constraint_set0_flag = 0;
sps->constraint_set1_flag = 1;
sps->constraint_set2_flag = 0;
sps->constraint_set3_flag = 0;
sps->constraint_set4_flag = 0;
sps->constraint_set5_flag = 0;
- sps->level_idc = nal_h264_level_from_v4l2(level);
+ sps->level_idc = nal_h264_level(level);
sps->seq_parameter_set_id = 0;
sps->log2_max_frame_num_minus4 = LOG2_MAX_FRAME_NUM - 4;
sps->pic_order_cnt_type = 0;
@@ -1541,13 +1620,17 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
sps->vui_parameters_present_flag = 1;
sps->vui.aspect_ratio_info_present_flag = 0;
sps->vui.overscan_info_present_flag = 0;
+
sps->vui.video_signal_type_present_flag = 1;
- sps->vui.video_format = 1;
- sps->vui.video_full_range_flag = 0;
+ sps->vui.video_format = 5; /* unspecified */
+ sps->vui.video_full_range_flag = nal_h264_full_range(channel->quantization);
sps->vui.colour_description_present_flag = 1;
- sps->vui.colour_primaries = 5;
- sps->vui.transfer_characteristics = 5;
- sps->vui.matrix_coefficients = 5;
+ sps->vui.colour_primaries = nal_h264_color_primaries(channel->colorspace);
+ sps->vui.transfer_characteristics =
+ nal_h264_transfer_characteristics(channel->colorspace, channel->xfer_func);
+ sps->vui.matrix_coefficients =
+ nal_h264_matrix_coeffs(channel->colorspace, channel->ycbcr_enc);
+
sps->vui.chroma_loc_info_present_flag = 1;
sps->vui.chroma_sample_loc_type_top_field = 0;
sps->vui.chroma_sample_loc_type_bottom_field = 0;
@@ -1560,8 +1643,9 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
sps->vui.nal_hrd_parameters_present_flag = 0;
sps->vui.vcl_hrd_parameters_present_flag = 1;
sps->vui.vcl_hrd_parameters.cpb_cnt_minus1 = 0;
- sps->vui.vcl_hrd_parameters.bit_rate_scale = 0;
/* See Rec. ITU-T H.264 (04/2017) p. 410 E-53 */
+ sps->vui.vcl_hrd_parameters.bit_rate_scale =
+ ffs(channel->bitrate_peak) - 6;
sps->vui.vcl_hrd_parameters.bit_rate_value_minus1[0] =
channel->bitrate_peak / (1 << (6 + sps->vui.vcl_hrd_parameters.bit_rate_scale)) - 1;
/* See Rec. ITU-T H.264 (04/2017) p. 410 E-54 */
@@ -1654,12 +1738,12 @@ static ssize_t allegro_hevc_write_vps(struct allegro_channel *channel,
vps->temporal_id_nesting_flag = 1;
ptl = &vps->profile_tier_level;
- ptl->general_profile_idc = nal_hevc_profile_from_v4l2(profile);
+ ptl->general_profile_idc = nal_hevc_profile(profile);
ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1;
- ptl->general_tier_flag = nal_hevc_tier_from_v4l2(tier);
+ ptl->general_tier_flag = nal_hevc_tier(tier);
ptl->general_progressive_source_flag = 1;
ptl->general_frame_only_constraint_flag = 1;
- ptl->general_level_idc = nal_hevc_level_from_v4l2(level);
+ ptl->general_level_idc = nal_hevc_level(level);
vps->sub_layer_ordering_info_present_flag = 0;
vps->max_dec_pic_buffering_minus1[0] = num_ref_frames;
@@ -1678,7 +1762,10 @@ static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
struct allegro_dev *dev = channel->dev;
struct nal_hevc_sps *sps;
struct nal_hevc_profile_tier_level *ptl;
+ struct nal_hevc_vui_parameters *vui;
+ struct nal_hevc_hrd_parameters *hrd;
ssize_t size;
+ unsigned int cpb_size;
unsigned int num_ref_frames = channel->num_ref_idx_l0;
s32 profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_profile);
s32 level = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level);
@@ -1691,12 +1778,12 @@ static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
sps->temporal_id_nesting_flag = 1;
ptl = &sps->profile_tier_level;
- ptl->general_profile_idc = nal_hevc_profile_from_v4l2(profile);
+ ptl->general_profile_idc = nal_hevc_profile(profile);
ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1;
- ptl->general_tier_flag = nal_hevc_tier_from_v4l2(tier);
+ ptl->general_tier_flag = nal_hevc_tier(tier);
ptl->general_progressive_source_flag = 1;
ptl->general_frame_only_constraint_flag = 1;
- ptl->general_level_idc = nal_hevc_level_from_v4l2(level);
+ ptl->general_level_idc = nal_hevc_level(level);
sps->seq_parameter_set_id = 0;
sps->chroma_format_idc = 1; /* Only 4:2:0 sampling supported */
@@ -1731,6 +1818,50 @@ static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
sps->sps_temporal_mvp_enabled_flag = channel->temporal_mvp_enable;
sps->strong_intra_smoothing_enabled_flag = channel->max_cu_size > 4;
+ sps->vui_parameters_present_flag = 1;
+ vui = &sps->vui;
+
+ vui->video_signal_type_present_flag = 1;
+ vui->video_format = 5; /* unspecified */
+ vui->video_full_range_flag = nal_hevc_full_range(channel->quantization);
+ vui->colour_description_present_flag = 1;
+ vui->colour_primaries = nal_hevc_color_primaries(channel->colorspace);
+ vui->transfer_characteristics = nal_hevc_transfer_characteristics(channel->colorspace,
+ channel->xfer_func);
+ vui->matrix_coeffs = nal_hevc_matrix_coeffs(channel->colorspace, channel->ycbcr_enc);
+
+ vui->chroma_loc_info_present_flag = 1;
+ vui->chroma_sample_loc_type_top_field = 0;
+ vui->chroma_sample_loc_type_bottom_field = 0;
+
+ vui->vui_timing_info_present_flag = 1;
+ vui->vui_num_units_in_tick = channel->framerate.denominator;
+ vui->vui_time_scale = channel->framerate.numerator;
+
+ vui->bitstream_restriction_flag = 1;
+ vui->motion_vectors_over_pic_boundaries_flag = 1;
+ vui->restricted_ref_pic_lists_flag = 1;
+ vui->log2_max_mv_length_horizontal = 15;
+ vui->log2_max_mv_length_vertical = 15;
+
+ vui->vui_hrd_parameters_present_flag = 1;
+ hrd = &vui->nal_hrd_parameters;
+ hrd->vcl_hrd_parameters_present_flag = 1;
+
+ hrd->initial_cpb_removal_delay_length_minus1 = 31;
+ hrd->au_cpb_removal_delay_length_minus1 = 30;
+ hrd->dpb_output_delay_length_minus1 = 30;
+
+ hrd->bit_rate_scale = ffs(channel->bitrate_peak) - 6;
+ hrd->vcl_hrd[0].bit_rate_value_minus1[0] =
+ (channel->bitrate_peak >> (6 + hrd->bit_rate_scale)) - 1;
+
+ cpb_size = v4l2_ctrl_g_ctrl(channel->mpeg_video_cpb_size) * 1000;
+ hrd->cpb_size_scale = ffs(cpb_size) - 4;
+ hrd->vcl_hrd[0].cpb_size_value_minus1[0] = (cpb_size >> (4 + hrd->cpb_size_scale)) - 1;
+
+ hrd->vcl_hrd[0].cbr_flag[0] = !v4l2_ctrl_g_ctrl(channel->mpeg_video_frame_rc_enable);
+
size = nal_hevc_write_sps(&dev->plat_dev->dev, dest, n, sps);
kfree(sps);
@@ -2185,6 +2316,15 @@ static irqreturn_t allegro_irq_thread(int irq, void *data)
{
struct allegro_dev *dev = data;
+ /*
+ * The firmware is initialized after the mailbox is setup. We further
+ * check the AL5_ITC_CPU_IRQ_STA register, if the firmware actually
+ * triggered the interrupt. Although this should not happen, make sure
+ * that we ignore interrupts, if the mailbox is not initialized.
+ */
+ if (!dev->mbox_status)
+ return IRQ_NONE;
+
allegro_mbox_notify(dev->mbox_status);
return IRQ_HANDLED;
@@ -2384,6 +2524,8 @@ static void allegro_destroy_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_cpb_size, false);
v4l2_ctrl_grab(channel->mpeg_video_gop_size, false);
+ v4l2_ctrl_grab(channel->encoder_buffer, false);
+
if (channel->user_id != -1) {
clear_bit(channel->user_id, &dev->channel_user_ids);
channel->user_id = -1;
@@ -2450,6 +2592,8 @@ static int allegro_create_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_cpb_size, true);
v4l2_ctrl_grab(channel->mpeg_video_gop_size, true);
+ v4l2_ctrl_grab(channel->encoder_buffer, true);
+
reinit_completion(&channel->completion);
allegro_mcu_send_create_channel(dev, channel);
timeout = wait_for_completion_timeout(&channel->completion,
@@ -2833,6 +2977,10 @@ static int allegro_try_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
allegro_clamp_bitrate(channel, ctrl);
break;
+ case V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER:
+ if (!channel->dev->has_encoder_buffer)
+ ctrl->val = 0;
+ break;
}
return 0;
@@ -2873,6 +3021,16 @@ static const struct v4l2_ctrl_ops allegro_ctrl_ops = {
.s_ctrl = allegro_s_ctrl,
};
+static const struct v4l2_ctrl_config allegro_encoder_buffer_ctrl_config = {
+ .id = V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER,
+ .name = "Encoder Buffer Enable",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
static int allegro_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
@@ -3024,6 +3182,8 @@ static int allegro_open(struct file *file)
V4L2_CID_MPEG_VIDEO_GOP_SIZE,
0, ALLEGRO_GOP_SIZE_MAX,
1, ALLEGRO_GOP_SIZE_DEFAULT);
+ channel->encoder_buffer = v4l2_ctrl_new_custom(handler,
+ &allegro_encoder_buffer_ctrl_config, NULL);
v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
@@ -3504,6 +3664,11 @@ static int allegro_mcu_hw_init(struct allegro_dev *dev,
return -EIO;
}
+ err = allegro_encoder_buffer_init(dev, &dev->encoder_buffer);
+ dev->has_encoder_buffer = (err == 0);
+ if (!dev->has_encoder_buffer)
+ v4l2_info(&dev->v4l2_dev, "encoder buffer not available\n");
+
allegro_mcu_enable_interrupts(dev);
/* The mcu sends INIT after reset. */
@@ -3591,11 +3756,16 @@ static void allegro_fw_callback(const struct firmware *fw, void *context)
v4l2_info(&dev->v4l2_dev,
"using mcu firmware version '%s'\n", dev->fw_info->version);
+ pm_runtime_enable(&dev->plat_dev->dev);
+ err = pm_runtime_resume_and_get(&dev->plat_dev->dev);
+ if (err)
+ goto err_release_firmware_codec;
+
/* Ensure that the mcu is sleeping at the reset vector */
err = allegro_mcu_reset(dev);
if (err) {
v4l2_err(&dev->v4l2_dev, "failed to reset mcu\n");
- goto err_release_firmware_codec;
+ goto err_suspend;
}
allegro_copy_firmware(dev, fw->data, fw->size);
@@ -3623,6 +3793,8 @@ static void allegro_fw_callback(const struct firmware *fw, void *context)
"allegro codec registered as /dev/video%d\n",
dev->video_dev.num);
+ dev->initialized = true;
+
release_firmware(fw_codec);
release_firmware(fw);
@@ -3635,6 +3807,9 @@ err_mcu_hw_deinit:
allegro_mcu_hw_deinit(dev);
err_free_fw_codec:
allegro_free_fw_codec(dev);
+err_suspend:
+ pm_runtime_put(&dev->plat_dev->dev);
+ pm_runtime_disable(&dev->plat_dev->dev);
err_release_firmware_codec:
release_firmware(fw_codec);
err_release_firmware:
@@ -3669,6 +3844,8 @@ static int allegro_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
+ dev->initialized = false;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!res) {
dev_err(&pdev->dev,
@@ -3707,6 +3884,18 @@ static int allegro_probe(struct platform_device *pdev)
return PTR_ERR(dev->sram);
}
+ dev->settings = syscon_regmap_lookup_by_compatible("xlnx,vcu-settings");
+ if (IS_ERR(dev->settings))
+ dev_warn(&pdev->dev, "failed to open settings\n");
+
+ dev->clk_core = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(dev->clk_core))
+ return PTR_ERR(dev->clk_core);
+
+ dev->clk_mcu = devm_clk_get(&pdev->dev, "mcu_clk");
+ if (IS_ERR(dev->clk_mcu))
+ return PTR_ERR(dev->clk_mcu);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -3739,17 +3928,75 @@ static int allegro_remove(struct platform_device *pdev)
{
struct allegro_dev *dev = platform_get_drvdata(pdev);
- video_unregister_device(&dev->video_dev);
- if (dev->m2m_dev)
- v4l2_m2m_release(dev->m2m_dev);
- allegro_mcu_hw_deinit(dev);
- allegro_free_fw_codec(dev);
+ if (dev->initialized) {
+ video_unregister_device(&dev->video_dev);
+ if (dev->m2m_dev)
+ v4l2_m2m_release(dev->m2m_dev);
+ allegro_mcu_hw_deinit(dev);
+ allegro_free_fw_codec(dev);
+ }
+
+ pm_runtime_put(&dev->plat_dev->dev);
+ pm_runtime_disable(&dev->plat_dev->dev);
v4l2_device_unregister(&dev->v4l2_dev);
return 0;
}
+static int allegro_runtime_resume(struct device *device)
+{
+ struct allegro_dev *dev = dev_get_drvdata(device);
+ struct regmap *settings = dev->settings;
+ unsigned int clk_mcu;
+ unsigned int clk_core;
+ int err;
+
+ if (!settings)
+ return -EINVAL;
+
+#define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000)
+
+ err = regmap_read(settings, VCU_CORE_CLK, &clk_core);
+ if (err < 0)
+ return err;
+ err = clk_set_rate(dev->clk_core, MHZ_TO_HZ(clk_core));
+ if (err < 0)
+ return err;
+ err = clk_prepare_enable(dev->clk_core);
+ if (err)
+ return err;
+
+ err = regmap_read(settings, VCU_MCU_CLK, &clk_mcu);
+ if (err < 0)
+ goto disable_clk_core;
+ err = clk_set_rate(dev->clk_mcu, MHZ_TO_HZ(clk_mcu));
+ if (err < 0)
+ goto disable_clk_core;
+ err = clk_prepare_enable(dev->clk_mcu);
+ if (err)
+ goto disable_clk_core;
+
+#undef MHZ_TO_HZ
+
+ return 0;
+
+disable_clk_core:
+ clk_disable_unprepare(dev->clk_core);
+
+ return err;
+}
+
+static int allegro_runtime_suspend(struct device *device)
+{
+ struct allegro_dev *dev = dev_get_drvdata(device);
+
+ clk_disable_unprepare(dev->clk_mcu);
+ clk_disable_unprepare(dev->clk_core);
+
+ return 0;
+}
+
static const struct of_device_id allegro_dt_ids[] = {
{ .compatible = "allegro,al5e-1.1" },
{ /* sentinel */ }
@@ -3757,12 +4004,18 @@ static const struct of_device_id allegro_dt_ids[] = {
MODULE_DEVICE_TABLE(of, allegro_dt_ids);
+static const struct dev_pm_ops allegro_pm_ops = {
+ .runtime_resume = allegro_runtime_resume,
+ .runtime_suspend = allegro_runtime_suspend,
+};
+
static struct platform_driver allegro_driver = {
.probe = allegro_probe,
.remove = allegro_remove,
.driver = {
.name = "allegro",
.of_match_table = of_match_ptr(allegro_dt_ids),
+ .pm = &allegro_pm_ops,
},
};
diff --git a/drivers/media/platform/allegro-dvt/allegro-mail.c b/drivers/media/platform/allegro-dvt/allegro-mail.c
index 7e08c5050f2e..16effad10746 100644
--- a/drivers/media/platform/allegro-dvt/allegro-mail.c
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.c
@@ -49,11 +49,11 @@ allegro_enc_init(u32 *dst, struct mcu_msg_init_request *msg)
dst[i++] = msg->reserved0;
dst[i++] = msg->suballoc_dma;
dst[i++] = msg->suballoc_size;
- dst[i++] = msg->l2_cache[0];
- dst[i++] = msg->l2_cache[1];
- dst[i++] = msg->l2_cache[2];
+ dst[i++] = msg->encoder_buffer_size;
+ dst[i++] = msg->encoder_buffer_color_depth;
+ dst[i++] = msg->num_cores;
if (version >= MCU_MSG_VERSION_2019_2) {
- dst[i++] = -1;
+ dst[i++] = msg->clk_rate;
dst[i++] = 0;
}
@@ -146,13 +146,10 @@ allegro_encode_config_blob(u32 *dst, struct create_channel_param *param)
FIELD_PREP(GENMASK(7, 0), param->tc_offset);
dst[i++] = param->unknown11;
dst[i++] = param->unknown12;
- if (version >= MCU_MSG_VERSION_2019_2)
- dst[i++] = param->num_slices;
- else
- dst[i++] = FIELD_PREP(GENMASK(31, 16), param->prefetch_auto) |
- FIELD_PREP(GENMASK(15, 0), param->num_slices);
- dst[i++] = param->prefetch_mem_offset;
- dst[i++] = param->prefetch_mem_size;
+ dst[i++] = param->num_slices;
+ dst[i++] = param->encoder_buffer_offset;
+ dst[i++] = param->encoder_buffer_enabled;
+
dst[i++] = FIELD_PREP(GENMASK(31, 16), param->clip_vrt_range) |
FIELD_PREP(GENMASK(15, 0), param->clip_hrz_range);
dst[i++] = FIELD_PREP(GENMASK(31, 16), param->me_range[1]) |
@@ -429,8 +426,8 @@ allegro_dec_encode_frame(struct mcu_msg_encode_frame_response *msg, u32 *src)
msg->frame_tag_size = src[i++];
msg->stuffing = src[i++];
msg->filler = src[i++];
- msg->num_column = FIELD_GET(GENMASK(31, 16), src[i]);
- msg->num_row = FIELD_GET(GENMASK(15, 0), src[i++]);
+ msg->num_row = FIELD_GET(GENMASK(31, 16), src[i]);
+ msg->num_column = FIELD_GET(GENMASK(15, 0), src[i++]);
msg->num_ref_idx_l1 = FIELD_GET(GENMASK(31, 24), src[i]);
msg->num_ref_idx_l0 = FIELD_GET(GENMASK(23, 16), src[i]);
msg->qp = FIELD_GET(GENMASK(15, 0), src[i++]);
diff --git a/drivers/media/platform/allegro-dvt/allegro-mail.h b/drivers/media/platform/allegro-dvt/allegro-mail.h
index 2c7bc509eac3..a5686058d754 100644
--- a/drivers/media/platform/allegro-dvt/allegro-mail.h
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.h
@@ -37,7 +37,10 @@ struct mcu_msg_init_request {
u32 reserved0; /* maybe a unused channel id */
u32 suballoc_dma;
u32 suballoc_size;
- s32 l2_cache[3];
+ s32 encoder_buffer_size;
+ s32 encoder_buffer_color_depth;
+ s32 num_cores;
+ s32 clk_rate;
};
struct mcu_msg_init_response {
@@ -79,9 +82,8 @@ struct create_channel_param {
u32 unknown11;
u32 unknown12;
u16 num_slices;
- u16 prefetch_auto;
- u32 prefetch_mem_offset;
- u32 prefetch_mem_size;
+ u32 encoder_buffer_offset;
+ u32 encoder_buffer_enabled;
u16 clip_hrz_range;
u16 clip_vrt_range;
u16 me_range[4];
diff --git a/drivers/media/platform/allegro-dvt/nal-h264.c b/drivers/media/platform/allegro-dvt/nal-h264.c
index 0ab2fcbee1b9..32663766340f 100644
--- a/drivers/media/platform/allegro-dvt/nal-h264.c
+++ b/drivers/media/platform/allegro-dvt/nal-h264.c
@@ -34,80 +34,6 @@ enum nal_unit_type {
FILLER_DATA = 12,
};
-/**
- * nal_h264_profile_from_v4l2() - Get profile_idc for v4l2 h264 profile
- * @profile: the profile as &enum v4l2_mpeg_video_h264_profile
- *
- * Convert the &enum v4l2_mpeg_video_h264_profile to profile_idc as specified
- * in Rec. ITU-T H.264 (04/2017) A.2.
- *
- * Return: the profile_idc for the passed level
- */
-int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile)
-{
- switch (profile) {
- case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
- return 66;
- case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
- return 77;
- case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
- return 88;
- case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
- return 100;
- default:
- return -EINVAL;
- }
-}
-
-/**
- * nal_h264_level_from_v4l2() - Get level_idc for v4l2 h264 level
- * @level: the level as &enum v4l2_mpeg_video_h264_level
- *
- * Convert the &enum v4l2_mpeg_video_h264_level to level_idc as specified in
- * Rec. ITU-T H.264 (04/2017) A.3.2.
- *
- * Return: the level_idc for the passed level
- */
-int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level)
-{
- switch (level) {
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
- return 10;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
- return 9;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
- return 11;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
- return 12;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
- return 13;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
- return 20;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
- return 21;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
- return 22;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
- return 30;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
- return 31;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
- return 32;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
- return 40;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
- return 41;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
- return 42;
- case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
- return 50;
- case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
- return 51;
- default:
- return -EINVAL;
- }
-}
-
static void nal_h264_write_start_code_prefix(struct rbsp *rbsp)
{
u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
diff --git a/drivers/media/platform/allegro-dvt/nal-h264.h b/drivers/media/platform/allegro-dvt/nal-h264.h
index a19634fe8c0b..34db07cda652 100644
--- a/drivers/media/platform/allegro-dvt/nal-h264.h
+++ b/drivers/media/platform/allegro-dvt/nal-h264.h
@@ -8,8 +8,11 @@
#ifndef __NAL_H264_H__
#define __NAL_H264_H__
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
/*
* struct nal_h264_hrd_parameters - HRD parameters
@@ -187,8 +190,201 @@ struct nal_h264_pps {
};
};
-int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile);
-int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level);
+/**
+ * nal_h264_profile() - Get profile_idc for v4l2 h264 profile
+ * @profile: the profile as &enum v4l2_mpeg_video_h264_profile
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_profile to profile_idc as specified
+ * in Rec. ITU-T H.264 (04/2017) A.2.
+ *
+ * Return: the profile_idc for the passed level
+ */
+static inline int nal_h264_profile(enum v4l2_mpeg_video_h264_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return 66;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return 77;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ return 88;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ return 100;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_h264_level() - Get level_idc for v4l2 h264 level
+ * @level: the level as &enum v4l2_mpeg_video_h264_level
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_level to level_idc as specified in
+ * Rec. ITU-T H.264 (04/2017) A.3.2.
+ *
+ * Return: the level_idc for the passed level
+ */
+static inline int nal_h264_level(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 10;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return 9;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 20;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 30;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 40;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 50;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return 51;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_h264_full_range() - Get video_full_range_flag for v4l2 quantization
+ * @quantization: the quantization type as &enum v4l2_quantization
+ *
+ * Convert the &enum v4l2_quantization to video_full_range_flag as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the video_full_range_flag value for the passed quantization
+ */
+static inline int nal_h264_full_range(enum v4l2_quantization quantization)
+{
+ switch (quantization) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ return 1;
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * nal_h264_color_primaries() - Get color_primaries for v4l2 colorspace
+ * @colorspace: the color space as &enum v4l2_colorspace
+ *
+ * Convert the &enum v4l2_colorspace to color_primaries as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the color_primaries value for the passed colorspace
+ */
+static inline int nal_h264_color_primaries(enum v4l2_colorspace colorspace)
+{
+ switch (colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ return 6;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return 7;
+ case V4L2_COLORSPACE_REC709:
+ return 1;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ return 4;
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return 5;
+ case V4L2_COLORSPACE_BT2020:
+ return 9;
+ case V4L2_COLORSPACE_DEFAULT:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_RAW:
+ case V4L2_COLORSPACE_DCI_P3:
+ default:
+ return 2;
+ }
+}
+
+/**
+ * nal_h264_transfer_characteristics() - Get transfer_characteristics for v4l2 xfer_func
+ * @colorspace: the color space as &enum v4l2_colorspace
+ * @xfer_func: the transfer function as &enum v4l2_xfer_func
+ *
+ * Convert the &enum v4l2_xfer_func to transfer_characteristics as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the transfer_characteristics value for the passed transfer function
+ */
+static inline int nal_h264_transfer_characteristics(enum v4l2_colorspace colorspace,
+ enum v4l2_xfer_func xfer_func)
+{
+ if (xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(colorspace);
+
+ switch (xfer_func) {
+ case V4L2_XFER_FUNC_709:
+ return 6;
+ case V4L2_XFER_FUNC_SMPTE2084:
+ return 16;
+ case V4L2_XFER_FUNC_SRGB:
+ case V4L2_XFER_FUNC_OPRGB:
+ case V4L2_XFER_FUNC_NONE:
+ case V4L2_XFER_FUNC_DCI_P3:
+ case V4L2_XFER_FUNC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
+
+/**
+ * nal_h264_matrix_coeffs() - Get matrix_coefficients for v4l2 v4l2_ycbcr_encoding
+ * @colorspace: the color space as &enum v4l2_colorspace
+ * @ycbcr_encoding: the ycbcr encoding as &enum v4l2_ycbcr_encoding
+ *
+ * Convert the &enum v4l2_ycbcr_encoding to matrix_coefficients as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the matrix_coefficients value for the passed encoding
+ */
+static inline int nal_h264_matrix_coeffs(enum v4l2_colorspace colorspace,
+ enum v4l2_ycbcr_encoding ycbcr_encoding)
+{
+ if (ycbcr_encoding == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_encoding = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
+
+ switch (ycbcr_encoding) {
+ case V4L2_YCBCR_ENC_601:
+ case V4L2_YCBCR_ENC_XV601:
+ return 5;
+ case V4L2_YCBCR_ENC_709:
+ case V4L2_YCBCR_ENC_XV709:
+ return 1;
+ case V4L2_YCBCR_ENC_BT2020:
+ return 9;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ return 10;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
ssize_t nal_h264_write_sps(const struct device *dev,
void *dest, size_t n, struct nal_h264_sps *sps);
diff --git a/drivers/media/platform/allegro-dvt/nal-hevc.c b/drivers/media/platform/allegro-dvt/nal-hevc.c
index 15a352e45831..9cdf2756e0a3 100644
--- a/drivers/media/platform/allegro-dvt/nal-hevc.c
+++ b/drivers/media/platform/allegro-dvt/nal-hevc.c
@@ -35,76 +35,6 @@ enum nal_unit_type {
FD_NUT = 38,
};
-int nal_hevc_profile_from_v4l2(enum v4l2_mpeg_video_hevc_profile profile)
-{
- switch (profile) {
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
- return 1;
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
- return 2;
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
- return 3;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(nal_hevc_profile_from_v4l2);
-
-int nal_hevc_tier_from_v4l2(enum v4l2_mpeg_video_hevc_tier tier)
-{
- switch (tier) {
- case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
- return 0;
- case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH:
- return 1;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(nal_hevc_tier_from_v4l2);
-
-int nal_hevc_level_from_v4l2(enum v4l2_mpeg_video_hevc_level level)
-{
- /*
- * T-Rec-H.265 p. 280: general_level_idc and sub_layer_level_idc[ i ]
- * shall be set equal to a value of 30 times the level number
- * specified in Table A.6.
- */
- int factor = 30 / 10;
-
- switch (level) {
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
- return factor * 10;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
- return factor * 20;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
- return factor * 21;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
- return factor * 30;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
- return factor * 31;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
- return factor * 40;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
- return factor * 41;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
- return factor * 50;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
- return factor * 51;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
- return factor * 52;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
- return factor * 60;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
- return factor * 61;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
- return factor * 62;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(nal_hevc_level_from_v4l2);
-
static void nal_hevc_write_start_code_prefix(struct rbsp *rbsp)
{
u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
@@ -277,6 +207,136 @@ static void nal_hevc_rbsp_vps(struct rbsp *rbsp, struct nal_hevc_vps *vps)
rbsp_unsupported(rbsp);
}
+static void nal_hevc_rbsp_sub_layer_hrd_parameters(struct rbsp *rbsp,
+ struct nal_hevc_sub_layer_hrd_parameters *hrd)
+{
+ unsigned int i;
+ unsigned int cpb_cnt = 1;
+
+ for (i = 0; i < cpb_cnt; i++) {
+ rbsp_uev(rbsp, &hrd->bit_rate_value_minus1[i]);
+ rbsp_uev(rbsp, &hrd->cpb_size_value_minus1[i]);
+ rbsp_bit(rbsp, &hrd->cbr_flag[i]);
+ }
+}
+
+static void nal_hevc_rbsp_hrd_parameters(struct rbsp *rbsp,
+ struct nal_hevc_hrd_parameters *hrd)
+{
+ unsigned int i;
+ unsigned int max_num_sub_layers_minus_1 = 0;
+
+ rbsp_bit(rbsp, &hrd->nal_hrd_parameters_present_flag);
+ rbsp_bit(rbsp, &hrd->vcl_hrd_parameters_present_flag);
+ if (hrd->nal_hrd_parameters_present_flag || hrd->vcl_hrd_parameters_present_flag) {
+ rbsp_bit(rbsp, &hrd->sub_pic_hrd_params_present_flag);
+ if (hrd->sub_pic_hrd_params_present_flag) {
+ rbsp_bits(rbsp, 8, &hrd->tick_divisor_minus2);
+ rbsp_bits(rbsp, 5, &hrd->du_cpb_removal_delay_increment_length_minus1);
+ rbsp_bit(rbsp, &hrd->sub_pic_cpb_params_in_pic_timing_sei_flag);
+ rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_du_length_minus1);
+ }
+ rbsp_bits(rbsp, 4, &hrd->bit_rate_scale);
+ rbsp_bits(rbsp, 4, &hrd->cpb_size_scale);
+ if (hrd->sub_pic_hrd_params_present_flag)
+ rbsp_bits(rbsp, 4, &hrd->cpb_size_du_scale);
+ rbsp_bits(rbsp, 5, &hrd->initial_cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->au_cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_length_minus1);
+ }
+ for (i = 0; i <= max_num_sub_layers_minus_1; i++) {
+ rbsp_bit(rbsp, &hrd->fixed_pic_rate_general_flag[i]);
+ if (!hrd->fixed_pic_rate_general_flag[i])
+ rbsp_bit(rbsp, &hrd->fixed_pic_rate_within_cvs_flag[i]);
+ if (hrd->fixed_pic_rate_within_cvs_flag[i])
+ rbsp_uev(rbsp, &hrd->elemental_duration_in_tc_minus1[i]);
+ else
+ rbsp_bit(rbsp, &hrd->low_delay_hrd_flag[i]);
+ if (!hrd->low_delay_hrd_flag[i])
+ rbsp_uev(rbsp, &hrd->cpb_cnt_minus1[i]);
+ if (hrd->nal_hrd_parameters_present_flag)
+ nal_hevc_rbsp_sub_layer_hrd_parameters(rbsp, &hrd->vcl_hrd[i]);
+ if (hrd->vcl_hrd_parameters_present_flag)
+ nal_hevc_rbsp_sub_layer_hrd_parameters(rbsp, &hrd->vcl_hrd[i]);
+ }
+}
+
+static void nal_hevc_rbsp_vui_parameters(struct rbsp *rbsp,
+ struct nal_hevc_vui_parameters *vui)
+{
+ if (!vui) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp_bit(rbsp, &vui->aspect_ratio_info_present_flag);
+ if (vui->aspect_ratio_info_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->aspect_ratio_idc);
+ if (vui->aspect_ratio_idc == 255) {
+ rbsp_bits(rbsp, 16, &vui->sar_width);
+ rbsp_bits(rbsp, 16, &vui->sar_height);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->overscan_info_present_flag);
+ if (vui->overscan_info_present_flag)
+ rbsp_bit(rbsp, &vui->overscan_appropriate_flag);
+
+ rbsp_bit(rbsp, &vui->video_signal_type_present_flag);
+ if (vui->video_signal_type_present_flag) {
+ rbsp_bits(rbsp, 3, &vui->video_format);
+ rbsp_bit(rbsp, &vui->video_full_range_flag);
+
+ rbsp_bit(rbsp, &vui->colour_description_present_flag);
+ if (vui->colour_description_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->colour_primaries);
+ rbsp_bits(rbsp, 8, &vui->transfer_characteristics);
+ rbsp_bits(rbsp, 8, &vui->matrix_coeffs);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->chroma_loc_info_present_flag);
+ if (vui->chroma_loc_info_present_flag) {
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_top_field);
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_bottom_field);
+ }
+
+ rbsp_bit(rbsp, &vui->neutral_chroma_indication_flag);
+ rbsp_bit(rbsp, &vui->field_seq_flag);
+ rbsp_bit(rbsp, &vui->frame_field_info_present_flag);
+ rbsp_bit(rbsp, &vui->default_display_window_flag);
+ if (vui->default_display_window_flag) {
+ rbsp_uev(rbsp, &vui->def_disp_win_left_offset);
+ rbsp_uev(rbsp, &vui->def_disp_win_right_offset);
+ rbsp_uev(rbsp, &vui->def_disp_win_top_offset);
+ rbsp_uev(rbsp, &vui->def_disp_win_bottom_offset);
+ }
+
+ rbsp_bit(rbsp, &vui->vui_timing_info_present_flag);
+ if (vui->vui_timing_info_present_flag) {
+ rbsp_bits(rbsp, 32, &vui->vui_num_units_in_tick);
+ rbsp_bits(rbsp, 32, &vui->vui_time_scale);
+ rbsp_bit(rbsp, &vui->vui_poc_proportional_to_timing_flag);
+ if (vui->vui_poc_proportional_to_timing_flag)
+ rbsp_uev(rbsp, &vui->vui_num_ticks_poc_diff_one_minus1);
+ rbsp_bit(rbsp, &vui->vui_hrd_parameters_present_flag);
+ if (vui->vui_hrd_parameters_present_flag)
+ nal_hevc_rbsp_hrd_parameters(rbsp, &vui->nal_hrd_parameters);
+ }
+
+ rbsp_bit(rbsp, &vui->bitstream_restriction_flag);
+ if (vui->bitstream_restriction_flag) {
+ rbsp_bit(rbsp, &vui->tiles_fixed_structure_flag);
+ rbsp_bit(rbsp, &vui->motion_vectors_over_pic_boundaries_flag);
+ rbsp_bit(rbsp, &vui->restricted_ref_pic_lists_flag);
+ rbsp_uev(rbsp, &vui->min_spatial_segmentation_idc);
+ rbsp_uev(rbsp, &vui->max_bytes_per_pic_denom);
+ rbsp_uev(rbsp, &vui->max_bits_per_min_cu_denom);
+ rbsp_uev(rbsp, &vui->log2_max_mv_length_horizontal);
+ rbsp_uev(rbsp, &vui->log2_max_mv_length_vertical);
+ }
+}
+
static void nal_hevc_rbsp_sps(struct rbsp *rbsp, struct nal_hevc_sps *sps)
{
unsigned int i;
@@ -345,7 +405,7 @@ static void nal_hevc_rbsp_sps(struct rbsp *rbsp, struct nal_hevc_sps *sps)
rbsp_bit(rbsp, &sps->strong_intra_smoothing_enabled_flag);
rbsp_bit(rbsp, &sps->vui_parameters_present_flag);
if (sps->vui_parameters_present_flag)
- rbsp_unsupported(rbsp);
+ nal_hevc_rbsp_vui_parameters(rbsp, &sps->vui);
rbsp_bit(rbsp, &sps->extension_present_flag);
if (sps->extension_present_flag) {
diff --git a/drivers/media/platform/allegro-dvt/nal-hevc.h b/drivers/media/platform/allegro-dvt/nal-hevc.h
index c09bbe5446aa..eb46f12aae80 100644
--- a/drivers/media/platform/allegro-dvt/nal-hevc.h
+++ b/drivers/media/platform/allegro-dvt/nal-hevc.h
@@ -8,9 +8,11 @@
#ifndef __NAL_HEVC_H__
#define __NAL_HEVC_H__
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
struct nal_hevc_profile_tier_level {
unsigned int general_profile_space;
@@ -318,16 +320,183 @@ struct nal_hevc_pps {
};
};
-int nal_hevc_profile_from_v4l2(enum v4l2_mpeg_video_hevc_profile profile);
-int nal_hevc_tier_from_v4l2(enum v4l2_mpeg_video_hevc_tier tier);
-int nal_hevc_level_from_v4l2(enum v4l2_mpeg_video_hevc_level level);
+/**
+ * nal_hevc_profile() - Get profile_idc for v4l2 hevc profile
+ * @profile: the profile as &enum v4l2_mpeg_video_hevc_profile
+ *
+ * Convert the &enum v4l2_mpeg_video_hevc_profile to profile_idc as specified
+ * in Rec. ITU-T H.265 (02/2018) A.3.
+ *
+ * Return: the profile_idc for the passed level
+ */
+static inline int nal_hevc_profile(enum v4l2_mpeg_video_hevc_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ return 1;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ return 2;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ return 3;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_hevc_tier() - Get tier_flag for v4l2 hevc tier
+ * @tier: the tier as &enum v4l2_mpeg_video_hevc_tier
+ *
+ * Convert the &enum v4l2_mpeg_video_hevc_tier to tier_flag as specified
+ * in Rec. ITU-T H.265 (02/2018) A.4.1.
+ *
+ * Return: the tier_flag for the passed tier
+ */
+static inline int nal_hevc_tier(enum v4l2_mpeg_video_hevc_tier tier)
+{
+ switch (tier) {
+ case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
+ return 0;
+ case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_hevc_level() - Get level_idc for v4l2 hevc level
+ * @level: the level as &enum v4l2_mpeg_video_hevc_level
+ *
+ * Convert the &enum v4l2_mpeg_video_hevc_level to level_idc as specified in
+ * Rec. ITU-T H.265 (02/2018) A.4.1.
+ *
+ * Return: the level_idc for the passed level
+ */
+static inline int nal_hevc_level(enum v4l2_mpeg_video_hevc_level level)
+{
+ /*
+ * T-Rec-H.265 p. 280: general_level_idc and sub_layer_level_idc[ i ]
+ * shall be set equal to a value of 30 times the level number
+ * specified in Table A.6.
+ */
+ int factor = 30 / 10;
+
+ switch (level) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ return factor * 10;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return factor * 20;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return factor * 21;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return factor * 30;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return factor * 31;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return factor * 40;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return factor * 41;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return factor * 50;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return factor * 51;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ return factor * 52;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
+ return factor * 60;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
+ return factor * 61;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
+ return factor * 62;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int nal_hevc_full_range(enum v4l2_quantization quantization)
+{
+ switch (quantization) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ return 1;
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline int nal_hevc_color_primaries(enum v4l2_colorspace colorspace)
+{
+ switch (colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ return 6;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return 7;
+ case V4L2_COLORSPACE_REC709:
+ return 1;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ return 4;
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return 5;
+ case V4L2_COLORSPACE_BT2020:
+ return 9;
+ case V4L2_COLORSPACE_DEFAULT:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_RAW:
+ case V4L2_COLORSPACE_DCI_P3:
+ default:
+ return 2;
+ }
+}
+
+static inline int nal_hevc_transfer_characteristics(enum v4l2_colorspace colorspace,
+ enum v4l2_xfer_func xfer_func)
+{
+ if (xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(colorspace);
+
+ switch (xfer_func) {
+ case V4L2_XFER_FUNC_709:
+ return 6;
+ case V4L2_XFER_FUNC_SMPTE2084:
+ return 16;
+ case V4L2_XFER_FUNC_SRGB:
+ case V4L2_XFER_FUNC_OPRGB:
+ case V4L2_XFER_FUNC_NONE:
+ case V4L2_XFER_FUNC_DCI_P3:
+ case V4L2_XFER_FUNC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
+
+static inline int nal_hevc_matrix_coeffs(enum v4l2_colorspace colorspace,
+ enum v4l2_ycbcr_encoding ycbcr_encoding)
+{
+ if (ycbcr_encoding == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_encoding = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
-int nal_range_from_v4l2(enum v4l2_quantization quantization);
-int nal_color_primaries_from_v4l2(enum v4l2_colorspace colorspace);
-int nal_transfer_characteristics_from_v4l2(enum v4l2_colorspace colorspace,
- enum v4l2_xfer_func xfer_func);
-int nal_matrix_coeffs_from_v4l2(enum v4l2_colorspace colorspace,
- enum v4l2_ycbcr_encoding ycbcr_encoding);
+ switch (ycbcr_encoding) {
+ case V4L2_YCBCR_ENC_601:
+ case V4L2_YCBCR_ENC_XV601:
+ return 5;
+ case V4L2_YCBCR_ENC_709:
+ case V4L2_YCBCR_ENC_XV709:
+ return 1;
+ case V4L2_YCBCR_ENC_BT2020:
+ return 9;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ return 10;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
ssize_t nal_hevc_write_vps(const struct device *dev,
void *dest, size_t n, struct nal_hevc_vps *vps);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 1c9cb9e05fdf..2dfae9bc0bba 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -2297,7 +2297,7 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
dev_dbg(dev, "vpfe_get_pdata\n");
- v4l2_async_notifier_init(&vpfe->notifier);
+ v4l2_async_nf_init(&vpfe->notifier);
if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
return dev->platform_data;
@@ -2365,9 +2365,10 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
goto cleanup;
}
- pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
- &vpfe->notifier, of_fwnode_handle(rem),
- struct v4l2_async_subdev);
+ pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpfe->notifier,
+ of_fwnode_handle(rem),
+ struct
+ v4l2_async_subdev);
of_node_put(rem);
if (IS_ERR(pdata->asd[i]))
goto cleanup;
@@ -2377,7 +2378,7 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
return pdata;
cleanup:
- v4l2_async_notifier_cleanup(&vpfe->notifier);
+ v4l2_async_nf_cleanup(&vpfe->notifier);
of_node_put(endpoint);
return NULL;
}
@@ -2392,7 +2393,6 @@ static int vpfe_probe(struct platform_device *pdev)
struct vpfe_config *vpfe_cfg;
struct vpfe_device *vpfe;
struct vpfe_ccdc *ccdc;
- struct resource *res;
int ret;
vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
@@ -2410,8 +2410,7 @@ static int vpfe_probe(struct platform_device *pdev)
vpfe->cfg = vpfe_cfg;
ccdc = &vpfe->ccdc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
+ ccdc->ccdc_cfg.base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ccdc->ccdc_cfg.base_addr)) {
ret = PTR_ERR(ccdc->ccdc_cfg.base_addr);
goto probe_out_cleanup;
@@ -2465,7 +2464,7 @@ static int vpfe_probe(struct platform_device *pdev)
}
vpfe->notifier.ops = &vpfe_async_ops;
- ret = v4l2_async_notifier_register(&vpfe->v4l2_dev, &vpfe->notifier);
+ ret = v4l2_async_nf_register(&vpfe->v4l2_dev, &vpfe->notifier);
if (ret) {
vpfe_err(vpfe, "Error registering async notifier\n");
ret = -EINVAL;
@@ -2477,7 +2476,7 @@ static int vpfe_probe(struct platform_device *pdev)
probe_out_v4l2_unregister:
v4l2_device_unregister(&vpfe->v4l2_dev);
probe_out_cleanup:
- v4l2_async_notifier_cleanup(&vpfe->notifier);
+ v4l2_async_nf_cleanup(&vpfe->notifier);
return ret;
}
@@ -2490,8 +2489,8 @@ static int vpfe_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- v4l2_async_notifier_unregister(&vpfe->notifier);
- v4l2_async_notifier_cleanup(&vpfe->notifier);
+ v4l2_async_nf_unregister(&vpfe->notifier);
+ v4l2_async_nf_cleanup(&vpfe->notifier);
v4l2_device_unregister(&vpfe->v4l2_dev);
video_unregister_device(&vpfe->video_dev);
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index 7bb6babdcade..cad3f97515ae 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -23,6 +23,8 @@
#include <linux/videodev2.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
@@ -201,6 +203,14 @@ struct aspeed_video_buffer {
struct list_head link;
};
+struct aspeed_video_perf {
+ ktime_t last_sample;
+ u32 totaltime;
+ u32 duration;
+ u32 duration_min;
+ u32 duration_max;
+};
+
#define to_aspeed_video_buffer(x) \
container_of((x), struct aspeed_video_buffer, vb)
@@ -242,6 +252,8 @@ struct aspeed_video {
unsigned int frame_left;
unsigned int frame_right;
unsigned int frame_top;
+
+ struct aspeed_video_perf perf;
};
#define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev)
@@ -422,6 +434,21 @@ static void aspeed_video_init_jpeg_table(u32 *table, bool yuv420)
}
}
+// just update jpeg dct table per 420/444
+static void aspeed_video_update_jpeg_table(u32 *table, bool yuv420)
+{
+ int i;
+ unsigned int base;
+
+ for (i = 0; i < ASPEED_VIDEO_JPEG_NUM_QUALITIES; i++) {
+ base = 256 * i; /* AST HW requires this header spacing */
+ base += ASPEED_VIDEO_JPEG_HEADER_SIZE +
+ ASPEED_VIDEO_JPEG_DCT_SIZE;
+
+ table[base + 2] = (yuv420) ? 0x00220103 : 0x00110103;
+ }
+}
+
static void aspeed_video_update(struct aspeed_video *video, u32 reg, u32 clear,
u32 bits)
{
@@ -450,6 +477,16 @@ static void aspeed_video_write(struct aspeed_video *video, u32 reg, u32 val)
readl(video->base + reg));
}
+static void update_perf(struct aspeed_video_perf *p)
+{
+ p->duration =
+ ktime_to_ms(ktime_sub(ktime_get(), p->last_sample));
+ p->totaltime += p->duration;
+
+ p->duration_max = max(p->duration, p->duration_max);
+ p->duration_min = min(p->duration, p->duration_min);
+}
+
static int aspeed_video_start_frame(struct aspeed_video *video)
{
dma_addr_t addr;
@@ -488,6 +525,8 @@ static int aspeed_video_start_frame(struct aspeed_video *video)
aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
VE_INTERRUPT_COMP_COMPLETE);
+ video->perf.last_sample = ktime_get();
+
aspeed_video_update(video, VE_SEQ_CTRL, 0,
VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_TRIG_COMP);
@@ -564,6 +603,12 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
u32 sts = aspeed_video_read(video, VE_INTERRUPT_STATUS);
/*
+ * Hardware sometimes asserts interrupts that we haven't actually
+ * enabled; ignore them if so.
+ */
+ sts &= aspeed_video_read(video, VE_INTERRUPT_CTRL);
+
+ /*
* Resolution changed or signal was lost; reset the engine and
* re-initialize
*/
@@ -597,6 +642,8 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
u32 frame_size = aspeed_video_read(video,
video->comp_size_read);
+ update_perf(&video->perf);
+
spin_lock(&video->lock);
clear_bit(VIDEO_FRAME_INPRG, &video->flags);
buf = list_first_entry_or_null(&video->buffers,
@@ -629,16 +676,6 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
aspeed_video_start_frame(video);
}
- /*
- * CAPTURE_COMPLETE and FRAME_COMPLETE interrupts come even when these
- * are disabled in the VE_INTERRUPT_CTRL register so clear them to
- * prevent unnecessary interrupt calls.
- */
- if (sts & VE_INTERRUPT_CAPTURE_COMPLETE)
- sts &= ~VE_INTERRUPT_CAPTURE_COMPLETE;
- if (sts & VE_INTERRUPT_FRAME_COMPLETE)
- sts &= ~VE_INTERRUPT_FRAME_COMPLETE;
-
return sts ? IRQ_NONE : IRQ_HANDLED;
}
@@ -764,6 +801,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
det->width = MIN_WIDTH;
det->height = MIN_HEIGHT;
video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+ memset(&video->perf, 0, sizeof(video->perf));
do {
if (tries) {
@@ -1293,7 +1331,7 @@ static void aspeed_video_update_jpeg_quality(struct aspeed_video *video)
static void aspeed_video_update_subsampling(struct aspeed_video *video)
{
if (video->jpeg.virt)
- aspeed_video_init_jpeg_table(video->jpeg.virt, video->yuv420);
+ aspeed_video_update_jpeg_table(video->jpeg.virt, video->yuv420);
if (video->yuv420)
aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_YUV420);
@@ -1454,6 +1492,8 @@ static int aspeed_video_start_streaming(struct vb2_queue *q,
struct aspeed_video *video = vb2_get_drv_priv(q);
video->sequence = 0;
+ video->perf.duration_max = 0;
+ video->perf.duration_min = 0xffffffff;
rc = aspeed_video_start_frame(video);
if (rc) {
@@ -1521,6 +1561,71 @@ static const struct vb2_ops aspeed_video_vb2_ops = {
.buf_queue = aspeed_video_buf_queue,
};
+#ifdef CONFIG_DEBUG_FS
+static int aspeed_video_debugfs_show(struct seq_file *s, void *data)
+{
+ struct aspeed_video *v = s->private;
+
+ seq_puts(s, "\n");
+
+ seq_printf(s, " %-20s:\t%s\n", "Signal",
+ v->v4l2_input_status ? "Unlock" : "Lock");
+ seq_printf(s, " %-20s:\t%d\n", "Width", v->pix_fmt.width);
+ seq_printf(s, " %-20s:\t%d\n", "Height", v->pix_fmt.height);
+ seq_printf(s, " %-20s:\t%d\n", "FRC", v->frame_rate);
+
+ seq_puts(s, "\n");
+
+ seq_puts(s, "Performance:\n");
+ seq_printf(s, " %-20s:\t%d\n", "Frame#", v->sequence);
+ seq_printf(s, " %-20s:\n", "Frame Duration(ms)");
+ seq_printf(s, " %-18s:\t%d\n", "Now", v->perf.duration);
+ seq_printf(s, " %-18s:\t%d\n", "Min", v->perf.duration_min);
+ seq_printf(s, " %-18s:\t%d\n", "Max", v->perf.duration_max);
+ seq_printf(s, " %-20s:\t%d\n", "FPS", 1000 / (v->perf.totaltime / v->sequence));
+
+ return 0;
+}
+
+static int aspeed_video_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aspeed_video_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations aspeed_video_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = aspeed_video_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *debugfs_entry;
+
+static void aspeed_video_debugfs_remove(struct aspeed_video *video)
+{
+ debugfs_remove_recursive(debugfs_entry);
+ debugfs_entry = NULL;
+}
+
+static int aspeed_video_debugfs_create(struct aspeed_video *video)
+{
+ debugfs_entry = debugfs_create_file(DEVICE_NAME, 0444, NULL,
+ video,
+ &aspeed_video_debugfs_ops);
+ if (!debugfs_entry)
+ aspeed_video_debugfs_remove(video);
+
+ return !debugfs_entry ? -EIO : 0;
+}
+#else
+static void aspeed_video_debugfs_remove(struct aspeed_video *video) { }
+static int aspeed_video_debugfs_create(struct aspeed_video *video)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
static int aspeed_video_setup_video(struct aspeed_video *video)
{
const u64 mask = ~(BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_444) |
@@ -1725,6 +1830,10 @@ static int aspeed_video_probe(struct platform_device *pdev)
return rc;
}
+ rc = aspeed_video_debugfs_create(video);
+ if (rc)
+ dev_err(video->dev, "debugfs create failed\n");
+
return 0;
}
@@ -1736,6 +1845,8 @@ static int aspeed_video_remove(struct platform_device *pdev)
aspeed_video_off(video);
+ aspeed_video_debugfs_remove(video);
+
clk_unprepare(video->vclk);
clk_unprepare(video->eclk);
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index 136ab7cf36ed..660cd0ab6749 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -123,11 +123,9 @@ static int isc_clk_prepare(struct clk_hw *hw)
struct isc_clk *isc_clk = to_isc_clk(hw);
int ret;
- if (isc_clk->id == ISC_ISPCK) {
- ret = pm_runtime_resume_and_get(isc_clk->dev);
- if (ret < 0)
- return ret;
- }
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return ret;
return isc_wait_clk_stable(hw);
}
@@ -138,8 +136,7 @@ static void isc_clk_unprepare(struct clk_hw *hw)
isc_wait_clk_stable(hw);
- if (isc_clk->id == ISC_ISPCK)
- pm_runtime_put_sync(isc_clk->dev);
+ pm_runtime_put_sync(isc_clk->dev);
}
static int isc_clk_enable(struct clk_hw *hw)
@@ -186,16 +183,13 @@ static int isc_clk_is_enabled(struct clk_hw *hw)
u32 status;
int ret;
- if (isc_clk->id == ISC_ISPCK) {
- ret = pm_runtime_resume_and_get(isc_clk->dev);
- if (ret < 0)
- return 0;
- }
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return 0;
regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
- if (isc_clk->id == ISC_ISPCK)
- pm_runtime_put_sync(isc_clk->dev);
+ pm_runtime_put_sync(isc_clk->dev);
return status & ISC_CLK(isc_clk->id) ? 1 : 0;
}
@@ -325,6 +319,9 @@ static int isc_clk_register(struct isc_device *isc, unsigned int id)
const char *parent_names[3];
int num_parents;
+ if (id == ISC_ISPCK && !isc->ispck_required)
+ return 0;
+
num_parents = of_clk_get_parent_count(np);
if (num_parents < 1 || num_parents > 3)
return -EINVAL;
@@ -2222,8 +2219,8 @@ void isc_subdev_cleanup(struct isc_device *isc)
struct isc_subdev_entity *subdev_entity;
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
- v4l2_async_notifier_unregister(&subdev_entity->notifier);
- v4l2_async_notifier_cleanup(&subdev_entity->notifier);
+ v4l2_async_nf_unregister(&subdev_entity->notifier);
+ v4l2_async_nf_cleanup(&subdev_entity->notifier);
}
INIT_LIST_HEAD(&isc->subdev_entities);
diff --git a/drivers/media/platform/atmel/atmel-isc.h b/drivers/media/platform/atmel/atmel-isc.h
index 19cc60dfcbe0..2bfcb135ef13 100644
--- a/drivers/media/platform/atmel/atmel-isc.h
+++ b/drivers/media/platform/atmel/atmel-isc.h
@@ -178,6 +178,7 @@ struct isc_reg_offsets {
* @hclock: Hclock clock input (refer datasheet)
* @ispck: iscpck clock (refer datasheet)
* @isc_clks: ISC clocks
+ * @ispck_required: ISC requires ISP Clock initialization
* @dcfg: DMA master configuration, architecture dependent
*
* @dev: Registered device driver
@@ -252,6 +253,7 @@ struct isc_device {
struct clk *hclock;
struct clk *ispck;
struct isc_clk isc_clks[2];
+ bool ispck_required;
u32 dcfg;
struct device *dev;
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 095d80c4f59e..4d15814e4481 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -1159,12 +1159,11 @@ static int isi_graph_init(struct atmel_isi *isi)
if (!ep)
return -EINVAL;
- v4l2_async_notifier_init(&isi->notifier);
+ v4l2_async_nf_init(&isi->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isi->notifier,
- of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&isi->notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
of_node_put(ep);
if (IS_ERR(asd))
@@ -1172,10 +1171,10 @@ static int isi_graph_init(struct atmel_isi *isi)
isi->notifier.ops = &isi_graph_notify_ops;
- ret = v4l2_async_notifier_register(&isi->v4l2_dev, &isi->notifier);
+ ret = v4l2_async_nf_register(&isi->v4l2_dev, &isi->notifier);
if (ret < 0) {
dev_err(isi->dev, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&isi->notifier);
+ v4l2_async_nf_cleanup(&isi->notifier);
return ret;
}
@@ -1327,8 +1326,8 @@ static int atmel_isi_remove(struct platform_device *pdev)
isi->p_fb_descriptors,
isi->fb_descriptors_phys);
pm_runtime_disable(&pdev->dev);
- v4l2_async_notifier_unregister(&isi->notifier);
- v4l2_async_notifier_cleanup(&isi->notifier);
+ v4l2_async_nf_unregister(&isi->notifier);
+ v4l2_async_nf_cleanup(&isi->notifier);
v4l2_device_unregister(&isi->v4l2_dev);
return 0;
diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
index b66f1d174e9d..1b2063cce0f7 100644
--- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
@@ -454,6 +454,9 @@ static int atmel_isc_probe(struct platform_device *pdev)
/* sama5d2-isc - 8 bits per beat */
isc->dcfg = ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
+ /* sama5d2-isc : ISPCK is required and mandatory */
+ isc->ispck_required = true;
+
ret = isc_pipeline_init(isc);
if (ret)
return ret;
@@ -476,22 +479,6 @@ static int atmel_isc_probe(struct platform_device *pdev)
dev_err(dev, "failed to init isc clock: %d\n", ret);
goto unprepare_hclk;
}
-
- isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
-
- ret = clk_prepare_enable(isc->ispck);
- if (ret) {
- dev_err(dev, "failed to enable ispck: %d\n", ret);
- goto unprepare_hclk;
- }
-
- /* ispck should be greater or equal to hclock */
- ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
- if (ret) {
- dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto unprepare_clk;
- }
-
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
@@ -512,13 +499,14 @@ static int atmel_isc_probe(struct platform_device *pdev)
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode =
+ of_fwnode_handle(subdev_entity->epn);
- v4l2_async_notifier_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &subdev_entity->notifier,
- of_fwnode_handle(subdev_entity->epn),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
+ fwnode,
+ struct v4l2_async_subdev);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -530,8 +518,8 @@ static int atmel_isc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &isc_async_ops;
- ret = v4l2_async_notifier_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
@@ -545,19 +533,35 @@ static int atmel_isc_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_request_idle(dev);
+ isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto cleanup_subdev;
+ }
+
+ /* ispck should be greater or equal to hclock */
+ ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
+ if (ret) {
+ dev_err(dev, "failed to set ispck rate: %d\n", ret);
+ goto unprepare_clk;
+ }
+
regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver);
dev_info(dev, "Microchip ISC version %x\n", ver);
return 0;
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+
cleanup_subdev:
isc_subdev_cleanup(isc);
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
-unprepare_clk:
- clk_disable_unprepare(isc->ispck);
unprepare_hclk:
clk_disable_unprepare(isc->hclock);
diff --git a/drivers/media/platform/atmel/atmel-sama7g5-isc.c b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
index f2785131ff56..5d1c76f680f3 100644
--- a/drivers/media/platform/atmel/atmel-sama7g5-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
@@ -447,6 +447,9 @@ static int microchip_xisc_probe(struct platform_device *pdev)
/* sama7g5-isc RAM access port is full AXI4 - 32 bits per beat */
isc->dcfg = ISC_DCFG_YMBSIZE_BEATS32 | ISC_DCFG_CMBSIZE_BEATS32;
+ /* sama7g5-isc : ISPCK does not exist, ISC is clocked by MCK */
+ isc->ispck_required = false;
+
ret = isc_pipeline_init(isc);
if (ret)
return ret;
@@ -470,25 +473,10 @@ static int microchip_xisc_probe(struct platform_device *pdev)
goto unprepare_hclk;
}
- isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
-
- ret = clk_prepare_enable(isc->ispck);
- if (ret) {
- dev_err(dev, "failed to enable ispck: %d\n", ret);
- goto unprepare_hclk;
- }
-
- /* ispck should be greater or equal to hclock */
- ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
- if (ret) {
- dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto unprepare_clk;
- }
-
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
- goto unprepare_clk;
+ goto unprepare_hclk;
}
ret = xisc_parse_dt(dev, isc);
@@ -505,13 +493,14 @@ static int microchip_xisc_probe(struct platform_device *pdev)
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode =
+ of_fwnode_handle(subdev_entity->epn);
- v4l2_async_notifier_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &subdev_entity->notifier,
- of_fwnode_handle(subdev_entity->epn),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
+ fwnode,
+ struct v4l2_async_subdev);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -523,8 +512,8 @@ static int microchip_xisc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &isc_async_ops;
- ret = v4l2_async_notifier_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
@@ -549,8 +538,6 @@ cleanup_subdev:
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
-unprepare_clk:
- clk_disable_unprepare(isc->ispck);
unprepare_hclk:
clk_disable_unprepare(isc->hclock);
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index f2b4ddd31177..cc3ebb0d96f6 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -279,13 +279,11 @@ static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = {
static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
struct platform_device *pdev)
{
- struct resource *res;
unsigned char i;
u32 dev_cfg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi2rx->base = devm_ioremap_resource(&pdev->dev, res);
+ csi2rx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi2rx->base))
return PTR_ERR(csi2rx->base);
@@ -401,21 +399,19 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
return -EINVAL;
}
- v4l2_async_notifier_init(&csi2rx->notifier);
+ v4l2_async_nf_init(&csi2rx->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&csi2rx->notifier,
- fwh,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
+ struct v4l2_async_subdev);
of_node_put(ep);
if (IS_ERR(asd))
return PTR_ERR(asd);
csi2rx->notifier.ops = &csi2rx_notifier_ops;
- ret = v4l2_async_subdev_notifier_register(&csi2rx->subdev,
- &csi2rx->notifier);
+ ret = v4l2_async_subdev_nf_register(&csi2rx->subdev, &csi2rx->notifier);
if (ret)
- v4l2_async_notifier_cleanup(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
return ret;
}
@@ -471,7 +467,7 @@ static int csi2rx_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- v4l2_async_notifier_cleanup(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
err_free_priv:
kfree(csi2rx);
return ret;
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index 5a67fba73ddd..8f8c36056354 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -433,13 +433,11 @@ static const struct v4l2_subdev_ops csi2tx_subdev_ops = {
static int csi2tx_get_resources(struct csi2tx_priv *csi2tx,
struct platform_device *pdev)
{
- struct resource *res;
unsigned int i;
u32 dev_cfg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi2tx->base = devm_ioremap_resource(&pdev->dev, res);
+ csi2tx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi2tx->base))
return PTR_ERR(csi2tx->base);
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 8bc0d8371819..6996d4571e36 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -301,8 +301,7 @@ static int vdoa_probe(struct platform_device *pdev)
return PTR_ERR(vdoa->vdoa_clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vdoa->regs = devm_ioremap_resource(vdoa->dev, res);
+ vdoa->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vdoa->regs))
return PTR_ERR(vdoa->regs);
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index bde241c26d79..4c8e31de12b1 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -621,7 +621,6 @@ static int venc_probe(struct platform_device *pdev)
{
const struct platform_device_id *pdev_id;
struct venc_state *venc;
- struct resource *res;
if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "No platform data for VENC sub device");
@@ -640,16 +639,12 @@ static int venc_probe(struct platform_device *pdev)
venc->pdev = &pdev->dev;
venc->pdata = pdev->dev.platform_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- venc->venc_base = devm_ioremap_resource(&pdev->dev, res);
+ venc->venc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(venc->venc_base))
return PTR_ERR(venc->venc_base);
if (venc->venc_type != VPBE_VERSION_1) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- venc->vdaccfg_reg = devm_ioremap_resource(&pdev->dev, res);
+ venc->vdaccfg_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(venc->vdaccfg_reg))
return PTR_ERR(venc->vdaccfg_reg);
}
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index f1ce10828b8e..5a89d885d0e3 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -425,12 +425,11 @@ EXPORT_SYMBOL(vpif_channel_getfid);
static int vpif_probe(struct platform_device *pdev)
{
- static struct resource *res, *res_irq;
+ static struct resource *res_irq;
struct platform_device *pdev_capture, *pdev_display;
struct device_node *endpoint = NULL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vpif_base = devm_ioremap_resource(&pdev->dev, res);
+ vpif_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vpif_base))
return PTR_ERR(vpif_base);
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index c034e25dd9aa..ae92e2c206d0 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1506,7 +1506,7 @@ vpif_capture_get_pdata(struct platform_device *pdev)
struct vpif_capture_chan_config *chan;
unsigned int i;
- v4l2_async_notifier_init(&vpif_obj.notifier);
+ v4l2_async_nf_init(&vpif_obj.notifier);
/*
* DT boot: OF node from parent device contains
@@ -1582,9 +1582,10 @@ vpif_capture_get_pdata(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Remote device %pOF found\n", rem);
sdinfo->name = rem->full_name;
- pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
- &vpif_obj.notifier, of_fwnode_handle(rem),
- struct v4l2_async_subdev);
+ pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpif_obj.notifier,
+ of_fwnode_handle(rem),
+ struct
+ v4l2_async_subdev);
if (IS_ERR(pdata->asd[i]))
goto err_cleanup;
@@ -1602,7 +1603,7 @@ done:
err_cleanup:
of_node_put(rem);
of_node_put(endpoint);
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
return NULL;
}
@@ -1692,8 +1693,8 @@ static __init int vpif_probe(struct platform_device *pdev)
goto probe_subdev_out;
} else {
vpif_obj.notifier.ops = &vpif_async_ops;
- err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
- &vpif_obj.notifier);
+ err = v4l2_async_nf_register(&vpif_obj.v4l2_dev,
+ &vpif_obj.notifier);
if (err) {
vpif_err("Error registering async notifier\n");
err = -EINVAL;
@@ -1711,7 +1712,7 @@ vpif_unregister:
vpif_free:
free_vpif_objs();
cleanup:
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
return err;
}
@@ -1727,8 +1728,8 @@ static int vpif_remove(struct platform_device *device)
struct channel_obj *ch;
int i;
- v4l2_async_notifier_unregister(&vpif_obj.notifier);
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+ v4l2_async_nf_unregister(&vpif_obj.notifier);
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
v4l2_device_unregister(&vpif_obj.v4l2_dev);
kfree(vpif_obj.sd);
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 7000f0bf0b35..d15b991ab17c 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -392,7 +392,6 @@ EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
static int vpss_probe(struct platform_device *pdev)
{
- struct resource *res;
char *platform_name;
if (!pdev->dev.platform_data) {
@@ -413,17 +412,12 @@ static int vpss_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%s vpss probed\n", platform_name);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- oper_cfg.vpss_regs_base0 = devm_ioremap_resource(&pdev->dev, res);
+ oper_cfg.vpss_regs_base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(oper_cfg.vpss_regs_base0))
return PTR_ERR(oper_cfg.vpss_regs_base0);
if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- oper_cfg.vpss_regs_base1 = devm_ioremap_resource(&pdev->dev,
- res);
+ oper_cfg.vpss_regs_base1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(oper_cfg.vpss_regs_base1))
return PTR_ERR(oper_cfg.vpss_regs_base1);
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index f49f3322f835..cfd6ae70b8d8 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -1137,8 +1137,7 @@ static int gsc_probe(struct platform_device *pdev)
spin_lock_init(&gsc->slock);
mutex_init(&gsc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gsc->regs = devm_ioremap_resource(dev, res);
+ gsc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gsc->regs))
return PTR_ERR(gsc->regs);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index fa648721eaab..544b54e428c9 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -464,9 +464,9 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
return -EINVAL;
}
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &fmd->subdev_notifier, of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&fmd->subdev_notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
of_node_put(ep);
@@ -557,7 +557,7 @@ rpm_put:
cleanup:
of_node_put(ports);
- v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
+ v4l2_async_nf_cleanup(&fmd->subdev_notifier);
pm_runtime_put(fmd->pmf);
return ret;
}
@@ -1481,7 +1481,7 @@ static int fimc_md_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fmd);
- v4l2_async_notifier_init(&fmd->subdev_notifier);
+ v4l2_async_nf_init(&fmd->subdev_notifier);
ret = fimc_md_register_platform_entities(fmd, dev->of_node);
if (ret)
@@ -1509,8 +1509,8 @@ static int fimc_md_probe(struct platform_device *pdev)
fmd->subdev_notifier.ops = &subdev_notifier_ops;
fmd->num_sensors = 0;
- ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
- &fmd->subdev_notifier);
+ ret = v4l2_async_nf_register(&fmd->v4l2_dev,
+ &fmd->subdev_notifier);
if (ret)
goto err_clk_p;
}
@@ -1522,7 +1522,7 @@ err_clk_p:
err_attr:
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
err_cleanup:
- v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
+ v4l2_async_nf_cleanup(&fmd->subdev_notifier);
err_m_ent:
fimc_md_unregister_entities(fmd);
err_clk:
@@ -1542,8 +1542,8 @@ static int fimc_md_remove(struct platform_device *pdev)
return 0;
fimc_md_unregister_clk_provider(fmd);
- v4l2_async_notifier_unregister(&fmd->subdev_notifier);
- v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
+ v4l2_async_nf_unregister(&fmd->subdev_notifier);
+ v4l2_async_nf_cleanup(&fmd->subdev_notifier);
v4l2_device_unregister(&fmd->v4l2_dev);
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 32b23329b033..27a214936cb0 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -766,7 +766,6 @@ static int s5pcsis_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
const struct csis_drvdata *drv_data;
struct device *dev = &pdev->dev;
- struct resource *mem_res;
struct csis_state *state;
int ret = -ENOMEM;
int i;
@@ -800,8 +799,7 @@ static int s5pcsis_probe(struct platform_device *pdev)
if (IS_ERR(state->phy))
return PTR_ERR(state->phy);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->regs = devm_ioremap_resource(dev, mem_res);
+ state->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
index 755138063ee6..4ca96cf9def7 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
@@ -49,6 +49,7 @@
#include <linux/slab.h>
#include <linux/irqreturn.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/string.h>
@@ -282,6 +283,20 @@ static const unsigned char jpeg_sos_maximal[] = {
0x11, 0x04, 0x11, 0x00, 0x3F, 0x00
};
+static const unsigned char jpeg_image_red[] = {
+ 0xFC, 0x5F, 0xA2, 0xBF, 0xCA, 0x73, 0xFE, 0xFE,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00
+};
+
static const unsigned char jpeg_eoi[] = {
0xFF, 0xD9
};
@@ -575,6 +590,10 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!dst_buf || !src_buf) {
+ dev_err(dev, "No source or destination buffer.\n");
+ goto job_unlock;
+ }
jpeg_src_buf = vb2_to_mxc_buf(&src_buf->vb2_buf);
if (dec_ret & SLOT_STATUS_ENC_CONFIG_ERR) {
@@ -760,6 +779,9 @@ static unsigned int mxc_jpeg_setup_cfg_stream(void *cfg_stream_vaddr,
sos = (struct mxc_jpeg_sos *)(cfg + offset);
offset += mxc_jpeg_fixup_sos(sos, fourcc);
+ memcpy(cfg + offset, jpeg_image_red, sizeof(jpeg_image_red));
+ offset += sizeof(jpeg_image_red);
+
memcpy(cfg + offset, jpeg_eoi, sizeof(jpeg_eoi));
offset += sizeof(jpeg_eoi);
@@ -795,6 +817,7 @@ static void mxc_jpeg_config_dec_desc(struct vb2_buffer *out_buf,
img_fmt = mxc_jpeg_fourcc_to_imgfmt(q_data_cap->fmt->fourcc);
desc->stm_ctrl &= ~STM_CTRL_IMAGE_FORMAT(0xF); /* clear image format */
desc->stm_ctrl |= STM_CTRL_IMAGE_FORMAT(img_fmt);
+ desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
desc->line_pitch = q_data_cap->bytesperline[0];
mxc_jpeg_addrs(desc, dst_buf, src_buf, 0);
mxc_jpeg_set_bufsize(desc, ALIGN(vb2_plane_size(src_buf, 0), 1024));
@@ -821,6 +844,7 @@ static void mxc_jpeg_config_dec_desc(struct vb2_buffer *out_buf,
cfg_desc->imgsize |= MXC_JPEG_MIN_HEIGHT;
cfg_desc->line_pitch = MXC_JPEG_MIN_WIDTH * 2;
cfg_desc->stm_ctrl = STM_CTRL_IMAGE_FORMAT(MXC_JPEG_YUV422);
+ cfg_desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
cfg_desc->stm_bufbase = cfg_stream_handle;
cfg_desc->stm_bufsize = ALIGN(*cfg_size, 1024);
print_descriptor_info(jpeg->dev, cfg_desc);
@@ -864,6 +888,7 @@ static void mxc_jpeg_config_enc_desc(struct vb2_buffer *out_buf,
cfg_desc->stm_bufsize = 0x0;
cfg_desc->imgsize = 0;
cfg_desc->stm_ctrl = STM_CTRL_CONFIG_MOD(1);
+ cfg_desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
desc->next_descpt_ptr = 0; /* end of chain */
@@ -878,6 +903,7 @@ static void mxc_jpeg_config_enc_desc(struct vb2_buffer *out_buf,
dev_err(jpeg->dev, "No valid image format detected\n");
desc->stm_ctrl = STM_CTRL_CONFIG_MOD(0) |
STM_CTRL_IMAGE_FORMAT(img_fmt);
+ desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
mxc_jpeg_addrs(desc, src_buf, dst_buf, 0);
dev_dbg(jpeg->dev, "cfg_desc:\n");
print_descriptor_info(jpeg->dev, cfg_desc);
@@ -933,11 +959,6 @@ static void mxc_jpeg_device_run(void *priv)
return;
}
- /*
- * TODO: this reset should be removed, once we figure out
- * how to overcome hardware issues both on encoder and decoder
- */
- mxc_jpeg_sw_reset(reg);
mxc_jpeg_enable(reg);
mxc_jpeg_set_l_endian(reg, 1);
@@ -1058,10 +1079,17 @@ static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(q);
struct mxc_jpeg_q_data *q_data = mxc_jpeg_get_q_data(ctx, q->type);
+ int ret;
dev_dbg(ctx->mxc_jpeg->dev, "Start streaming ctx=%p", ctx);
q_data->sequence = 0;
+ ret = pm_runtime_resume_and_get(ctx->mxc_jpeg->dev);
+ if (ret < 0) {
+ dev_err(ctx->mxc_jpeg->dev, "Failed to power up jpeg\n");
+ return ret;
+ }
+
return 0;
}
@@ -1079,9 +1107,10 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
else
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (!vbuf)
- return;
+ break;
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
}
+ pm_runtime_put_sync(&ctx->mxc_jpeg->pdev->dev);
}
static int mxc_jpeg_valid_comp_id(struct device *dev,
@@ -1941,8 +1970,7 @@ static int mxc_jpeg_attach_pm_domains(struct mxc_jpeg_dev *jpeg)
jpeg->pd_link[i] = device_link_add(dev, jpeg->pd_dev[i],
DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_RPM_ACTIVE);
+ DL_FLAG_PM_RUNTIME);
if (!jpeg->pd_link[i]) {
ret = -EINVAL;
goto fail;
@@ -1959,7 +1987,6 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
{
struct mxc_jpeg_dev *jpeg;
struct device *dev = &pdev->dev;
- struct resource *res;
int dec_irq;
int ret;
int mode;
@@ -1982,8 +2009,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
goto err_irq;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpeg->base_reg = devm_ioremap_resource(&pdev->dev, res);
+ jpeg->base_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->base_reg))
return PTR_ERR(jpeg->base_reg);
@@ -2007,6 +2033,19 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
jpeg->dev = dev;
jpeg->mode = mode;
+ /* Get clocks */
+ jpeg->clk_ipg = devm_clk_get(dev, "ipg");
+ if (IS_ERR(jpeg->clk_ipg)) {
+ dev_err(dev, "failed to get clock: ipg\n");
+ goto err_clk;
+ }
+
+ jpeg->clk_per = devm_clk_get(dev, "per");
+ if (IS_ERR(jpeg->clk_per)) {
+ dev_err(dev, "failed to get clock: per\n");
+ goto err_clk;
+ }
+
ret = mxc_jpeg_attach_pm_domains(jpeg);
if (ret < 0) {
dev_err(dev, "failed to attach power domains %d\n", ret);
@@ -2075,6 +2114,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
jpeg->dec_vdev->minor);
platform_set_drvdata(pdev, jpeg);
+ pm_runtime_enable(dev);
return 0;
@@ -2088,10 +2128,55 @@ err_m2m:
v4l2_device_unregister(&jpeg->v4l2_dev);
err_register:
+ mxc_jpeg_detach_pm_domains(jpeg);
+
err_irq:
+err_clk:
return ret;
}
+#ifdef CONFIG_PM
+static int mxc_jpeg_runtime_resume(struct device *dev)
+{
+ struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(jpeg->clk_ipg);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock: ipg\n");
+ goto err_ipg;
+ }
+
+ ret = clk_prepare_enable(jpeg->clk_per);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock: per\n");
+ goto err_per;
+ }
+
+ return 0;
+
+err_per:
+ clk_disable_unprepare(jpeg->clk_ipg);
+err_ipg:
+ return ret;
+}
+
+static int mxc_jpeg_runtime_suspend(struct device *dev)
+{
+ struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(jpeg->clk_ipg);
+ clk_disable_unprepare(jpeg->clk_per);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mxc_jpeg_pm_ops = {
+ SET_RUNTIME_PM_OPS(mxc_jpeg_runtime_suspend,
+ mxc_jpeg_runtime_resume, NULL)
+};
+
static int mxc_jpeg_remove(struct platform_device *pdev)
{
unsigned int slot;
@@ -2100,6 +2185,7 @@ static int mxc_jpeg_remove(struct platform_device *pdev)
for (slot = 0; slot < MXC_MAX_SLOTS; slot++)
mxc_jpeg_free_slot_data(jpeg, slot);
+ pm_runtime_disable(&pdev->dev);
video_unregister_device(jpeg->dec_vdev);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
@@ -2116,6 +2202,7 @@ static struct platform_driver mxc_jpeg_driver = {
.driver = {
.name = "mxc-jpeg",
.of_match_table = mxc_jpeg_match,
+ .pm = &mxc_jpeg_pm_ops,
},
};
module_platform_driver(mxc_jpeg_driver);
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/imx-jpeg/mxc-jpeg.h
index 4c210852e876..9fb2a5aaa941 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.h
@@ -109,6 +109,8 @@ struct mxc_jpeg_dev {
spinlock_t hw_lock; /* hardware access lock */
unsigned int mode;
struct mutex lock; /* v4l2 ioctls serialization */
+ struct clk *clk_ipg;
+ struct clk *clk_per;
struct platform_device *pdev;
struct device *dev;
void __iomem *base_reg;
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index 4321edc0c23d..723b096fedd1 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -1636,7 +1636,6 @@ static int pxp_soft_reset(struct pxp_dev *dev)
static int pxp_probe(struct platform_device *pdev)
{
struct pxp_dev *dev;
- struct resource *res;
struct video_device *vfd;
int irq;
int ret;
@@ -1652,8 +1651,7 @@ static int pxp_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->mmio = devm_ioremap_resource(&pdev->dev, res);
+ dev->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->mmio))
return PTR_ERR(dev->mmio);
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index 9aa374fa8b36..b61b9d9551af 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -544,12 +544,11 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (ret)
goto out_pdown;
- v4l2_async_notifier_init(&mcam->notifier);
+ v4l2_async_nf_init(&mcam->notifier);
- asd = v4l2_async_notifier_add_i2c_subdev(&mcam->notifier,
- i2c_adapter_id(cam->i2c_adapter),
- ov7670_info.addr,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_i2c(&mcam->notifier,
+ i2c_adapter_id(cam->i2c_adapter),
+ ov7670_info.addr, struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out_smbus_shutdown;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 58f9463f3b8c..ad4a7922d0d7 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1877,7 +1877,7 @@ int mccic_register(struct mcam_camera *cam)
cam->mbus_code = mcam_def_mbus_code;
cam->notifier.ops = &mccic_notify_ops;
- ret = v4l2_async_notifier_register(&cam->v4l2_dev, &cam->notifier);
+ ret = v4l2_async_nf_register(&cam->v4l2_dev, &cam->notifier);
if (ret < 0) {
cam_warn(cam, "failed to register a sensor notifier");
goto out;
@@ -1914,9 +1914,9 @@ int mccic_register(struct mcam_camera *cam)
return 0;
out:
- v4l2_async_notifier_unregister(&cam->notifier);
+ v4l2_async_nf_unregister(&cam->notifier);
v4l2_device_unregister(&cam->v4l2_dev);
- v4l2_async_notifier_cleanup(&cam->notifier);
+ v4l2_async_nf_cleanup(&cam->notifier);
return ret;
}
EXPORT_SYMBOL_GPL(mccic_register);
@@ -1936,9 +1936,9 @@ void mccic_shutdown(struct mcam_camera *cam)
if (cam->buffer_mode == B_vmalloc)
mcam_free_dma_bufs(cam);
v4l2_ctrl_handler_free(&cam->ctrl_handler);
- v4l2_async_notifier_unregister(&cam->notifier);
+ v4l2_async_nf_unregister(&cam->notifier);
v4l2_device_unregister(&cam->v4l2_dev);
- v4l2_async_notifier_cleanup(&cam->notifier);
+ v4l2_async_nf_cleanup(&cam->notifier);
}
EXPORT_SYMBOL_GPL(mccic_shutdown);
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index f2f09cea751d..343ab4f7d807 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -239,10 +239,10 @@ static int mmpcam_probe(struct platform_device *pdev)
if (!ep)
return -ENODEV;
- v4l2_async_notifier_init(&mcam->notifier);
+ v4l2_async_nf_init(&mcam->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&mcam->notifier, ep,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&mcam->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
index a1393fefa8ae..ccda18e5a377 100644
--- a/drivers/media/platform/meson/ge2d/ge2d.c
+++ b/drivers/media/platform/meson/ge2d/ge2d.c
@@ -779,11 +779,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
* If the rotation parameter changes the OUTPUT frames
* parameters, take them in account
*/
- if (fmt.width != ctx->out.pix_fmt.width ||
- fmt.height != ctx->out.pix_fmt.width ||
- fmt.bytesperline > ctx->out.pix_fmt.bytesperline ||
- fmt.sizeimage > ctx->out.pix_fmt.sizeimage)
- ctx->out.pix_fmt = fmt;
+ ctx->out.pix_fmt = fmt;
break;
}
@@ -926,7 +922,6 @@ static int ge2d_probe(struct platform_device *pdev)
struct reset_control *rst;
struct video_device *vfd;
struct meson_ge2d *ge2d;
- struct resource *res;
void __iomem *regs;
int ret = 0;
int irq;
@@ -941,8 +936,7 @@ static int ge2d_probe(struct platform_device *pdev)
ge2d->dev = &pdev->dev;
mutex_init(&ge2d->mutex);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(ge2d->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index a89c7b206eef..af994b9913a6 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -1341,7 +1341,6 @@ static inline void mtk_jpeg_clk_release(struct mtk_jpeg_dev *jpeg)
static int mtk_jpeg_probe(struct platform_device *pdev)
{
struct mtk_jpeg_dev *jpeg;
- struct resource *res;
int jpeg_irq;
int ret;
@@ -1355,8 +1354,7 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
jpeg->variant = of_device_get_match_data(jpeg->dev);
INIT_DELAYED_WORK(&jpeg->job_timeout_work, mtk_jpeg_job_timeout_work);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpeg->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ jpeg->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->reg_base)) {
ret = PTR_ERR(jpeg->reg_base);
return ret;
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index 4618d43dbbc8..ca8e9e7a9c4e 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -7,10 +7,13 @@ obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
vdec/vdec_vp8_if.o \
vdec/vdec_vp9_if.o \
+ vdec/vdec_h264_req_if.o \
mtk_vcodec_dec_drv.o \
vdec_drv_if.o \
vdec_vpu_if.o \
mtk_vcodec_dec.o \
+ mtk_vcodec_dec_stateful.o \
+ mtk_vcodec_dec_stateless.o \
mtk_vcodec_dec_pm.o \
mtk-vcodec-enc-y := venc/venc_vp8_if.o \
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 56d86e59421e..2b334a8a81c6 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -16,68 +16,18 @@
#include "vdec_drv_if.h"
#include "mtk_vcodec_dec_pm.h"
-#define OUT_FMT_IDX 0
-#define CAP_FMT_IDX 3
-
-#define MTK_VDEC_MIN_W 64U
-#define MTK_VDEC_MIN_H 64U
#define DFT_CFG_WIDTH MTK_VDEC_MIN_W
#define DFT_CFG_HEIGHT MTK_VDEC_MIN_H
-static const struct mtk_video_fmt mtk_video_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .type = MTK_FMT_DEC,
- .num_planes = 1,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8,
- .type = MTK_FMT_DEC,
- .num_planes = 1,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- },
- {
- .fourcc = V4L2_PIX_FMT_VP9,
- .type = MTK_FMT_DEC,
- .num_planes = 1,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- },
- {
- .fourcc = V4L2_PIX_FMT_MT21C,
- .type = MTK_FMT_FRAME,
- .num_planes = 2,
- },
-};
-
-static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP9,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
-};
-
-#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
-#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
-
-static const struct mtk_video_fmt *mtk_vdec_find_format(struct v4l2_format *f)
+static const struct mtk_video_fmt *
+mtk_vdec_find_format(struct v4l2_format *f,
+ const struct mtk_vcodec_dec_pdata *dec_pdata)
{
const struct mtk_video_fmt *fmt;
unsigned int k;
- for (k = 0; k < NUM_FORMATS; k++) {
- fmt = &mtk_video_formats[k];
+ for (k = 0; k < dec_pdata->num_formats; k++) {
+ fmt = &dec_pdata->vdec_formats[k];
if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
return fmt;
}
@@ -94,408 +44,17 @@ static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
return &ctx->q_data[MTK_Q_DATA_DST];
}
-/*
- * This function tries to clean all display buffers, the buffers will return
- * in display order.
- * Note the buffers returned from codec driver may still be in driver's
- * reference list.
- */
-static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct vdec_fb *disp_frame_buffer = NULL;
- struct mtk_video_dec_buf *dstbuf;
- struct vb2_v4l2_buffer *vb;
-
- mtk_v4l2_debug(3, "[%d]", ctx->id);
- if (vdec_if_get_param(ctx,
- GET_PARAM_DISP_FRAME_BUFFER,
- &disp_frame_buffer)) {
- mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER",
- ctx->id);
- return NULL;
- }
-
- if (disp_frame_buffer == NULL) {
- mtk_v4l2_debug(3, "No display frame buffer");
- return NULL;
- }
-
- dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
- frame_buffer);
- vb = &dstbuf->m2m_buf.vb;
- mutex_lock(&ctx->lock);
- if (dstbuf->used) {
- vb2_set_plane_payload(&vb->vb2_buf, 0,
- ctx->picinfo.fb_sz[0]);
- if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
- vb2_set_plane_payload(&vb->vb2_buf, 1,
- ctx->picinfo.fb_sz[1]);
-
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to done_list %d",
- ctx->id, disp_frame_buffer->status,
- vb->vb2_buf.index,
- dstbuf->queued_in_vb2);
-
- v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
- ctx->decoded_frame_cnt++;
- }
- mutex_unlock(&ctx->lock);
- return &vb->vb2_buf;
-}
-
-/*
- * This function tries to clean all capture buffers that are not used as
- * reference buffers by codec driver any more
- * In this case, we need re-queue buffer to vb2 buffer if user space
- * already returns this buffer to v4l2 or this buffer is just the output of
- * previous sps/pps/resolution change decode, or do nothing if user
- * space still owns this buffer
- */
-static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct mtk_video_dec_buf *dstbuf;
- struct vdec_fb *free_frame_buffer = NULL;
- struct vb2_v4l2_buffer *vb;
-
- if (vdec_if_get_param(ctx,
- GET_PARAM_FREE_FRAME_BUFFER,
- &free_frame_buffer)) {
- mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
- return NULL;
- }
- if (free_frame_buffer == NULL) {
- mtk_v4l2_debug(3, " No free frame buffer");
- return NULL;
- }
-
- mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p",
- ctx->id, free_frame_buffer);
-
- dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
- frame_buffer);
- vb = &dstbuf->m2m_buf.vb;
-
- mutex_lock(&ctx->lock);
- if (dstbuf->used) {
- if ((dstbuf->queued_in_vb2) &&
- (dstbuf->queued_in_v4l2) &&
- (free_frame_buffer->status == FB_ST_FREE)) {
- /*
- * After decode sps/pps or non-display buffer, we don't
- * need to return capture buffer to user space, but
- * just re-queue this capture buffer to vb2 queue.
- * This reduce overheads that dq/q unused capture
- * buffer. In this case, queued_in_vb2 = true.
- */
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to rdy_queue %d",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index,
- dstbuf->queued_in_vb2);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
- } else if (!dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2) {
- /*
- * If buffer in v4l2 driver but not in vb2 queue yet,
- * and we get this buffer from free_list, it means
- * that codec driver do not use this buffer as
- * reference buffer anymore. We should q buffer to vb2
- * queue, so later work thread could get this buffer
- * for decode. In this case, queued_in_vb2 = false
- * means this buffer is not from previous decode
- * output.
- */
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to rdy_queue",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
- dstbuf->queued_in_vb2 = true;
- } else {
- /*
- * Codec driver do not need to reference this capture
- * buffer and this buffer is not in v4l2 driver.
- * Then we don't need to do any thing, just add log when
- * we need to debug buffer flow.
- * When this buffer q from user space, it could
- * directly q to vb2 buffer
- */
- mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index,
- dstbuf->queued_in_vb2,
- dstbuf->queued_in_v4l2);
- }
- dstbuf->used = false;
- }
- mutex_unlock(&ctx->lock);
- return &vb->vb2_buf;
-}
-
-static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct vb2_buffer *framptr;
-
- do {
- framptr = get_display_buffer(ctx);
- } while (framptr);
-}
-
-static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct vb2_buffer *framptr;
-
- do {
- framptr = get_free_buffer(ctx);
- } while (framptr);
-}
-
-static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
-{
- static const struct v4l2_event ev_src_ch = {
- .type = V4L2_EVENT_SOURCE_CHANGE,
- .u.src_change.changes =
- V4L2_EVENT_SRC_CH_RESOLUTION,
- };
-
- mtk_v4l2_debug(1, "[%d]", ctx->id);
- v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
-}
-
-static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
-{
- bool res_chg;
- int ret = 0;
-
- ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
- if (ret)
- mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
-
- clean_display_buffer(ctx);
- clean_free_buffer(ctx);
-}
-
-static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
- unsigned int pixelformat)
-{
- const struct mtk_video_fmt *fmt;
- struct mtk_q_data *dst_q_data;
- unsigned int k;
-
- dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
- for (k = 0; k < NUM_FORMATS; k++) {
- fmt = &mtk_video_formats[k];
- if (fmt->fourcc == pixelformat) {
- mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
- dst_q_data->fmt->fourcc, pixelformat);
- dst_q_data->fmt = fmt;
- return;
- }
- }
-
- mtk_v4l2_err("Cannot get fourcc(%d), using init value", pixelformat);
-}
-
-static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
-{
- unsigned int dpbsize = 0;
- int ret;
-
- if (vdec_if_get_param(ctx,
- GET_PARAM_PIC_INFO,
- &ctx->last_decoded_picinfo)) {
- mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
- ctx->id);
- return -EINVAL;
- }
-
- if (ctx->last_decoded_picinfo.pic_w == 0 ||
- ctx->last_decoded_picinfo.pic_h == 0 ||
- ctx->last_decoded_picinfo.buf_w == 0 ||
- ctx->last_decoded_picinfo.buf_h == 0) {
- mtk_v4l2_err("Cannot get correct pic info");
- return -EINVAL;
- }
-
- if (ctx->last_decoded_picinfo.cap_fourcc != ctx->picinfo.cap_fourcc &&
- ctx->picinfo.cap_fourcc != 0)
- mtk_vdec_update_fmt(ctx, ctx->picinfo.cap_fourcc);
-
- if ((ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w) ||
- (ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h))
- return 0;
-
- mtk_v4l2_debug(1,
- "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
- ctx->id, ctx->last_decoded_picinfo.pic_w,
- ctx->last_decoded_picinfo.pic_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->last_decoded_picinfo.buf_w,
- ctx->last_decoded_picinfo.buf_h);
-
- ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
- if (dpbsize == 0)
- mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
-
- ctx->dpb_size = dpbsize;
-
- return ret;
-}
-
-static void mtk_vdec_worker(struct work_struct *work)
-{
- struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
- decode_work);
- struct mtk_vcodec_dev *dev = ctx->dev;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct mtk_vcodec_mem buf;
- struct vdec_fb *pfb;
- bool res_chg = false;
- int ret;
- struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
-
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- if (src_buf == NULL) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
- return;
- }
-
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- if (dst_buf == NULL) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
- return;
- }
-
- src_buf_info = container_of(src_buf, struct mtk_video_dec_buf,
- m2m_buf.vb);
- dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf,
- m2m_buf.vb);
-
- pfb = &dst_buf_info->frame_buffer;
- pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
- pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
- pfb->base_y.size = ctx->picinfo.fb_sz[0];
-
- pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1);
- pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
- pfb->base_c.size = ctx->picinfo.fb_sz[1];
- pfb->status = 0;
- mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
-
- mtk_v4l2_debug(3,
- "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
- dst_buf->vb2_buf.index, pfb,
- pfb->base_y.va, &pfb->base_y.dma_addr,
- &pfb->base_c.dma_addr, pfb->base_y.size);
-
- if (src_buf_info->lastframe) {
- mtk_v4l2_debug(1, "Got empty flush input buffer.");
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
-
- /* update dst buf status */
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- mutex_lock(&ctx->lock);
- dst_buf_info->used = false;
- mutex_unlock(&ctx->lock);
-
- vdec_if_decode(ctx, NULL, NULL, &res_chg);
- clean_display_buffer(ctx);
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
- if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
- vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
- dst_buf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
- clean_free_buffer(ctx);
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- return;
- }
- buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
- buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
- buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
- if (!buf.va) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
- ctx->id, src_buf->vb2_buf.index);
- return;
- }
- mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
- ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
- dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
- dst_buf->timecode = src_buf->timecode;
- mutex_lock(&ctx->lock);
- dst_buf_info->used = true;
- mutex_unlock(&ctx->lock);
- src_buf_info->used = true;
-
- ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
-
- if (ret) {
- mtk_v4l2_err(
- " <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
- ctx->id,
- src_buf->vb2_buf.index,
- buf.size,
- src_buf->vb2_buf.timestamp,
- dst_buf->vb2_buf.index,
- ret, res_chg);
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- if (ret == -EIO) {
- mutex_lock(&ctx->lock);
- src_buf_info->error = true;
- mutex_unlock(&ctx->lock);
- }
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
- } else if (!res_chg) {
- /*
- * we only return src buffer with VB2_BUF_STATE_DONE
- * when decode success without resolution change
- */
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- }
-
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- clean_display_buffer(ctx);
- clean_free_buffer(ctx);
-
- if (!ret && res_chg) {
- mtk_vdec_pic_info_update(ctx);
- /*
- * On encountering a resolution change in the stream.
- * The driver must first process and decode all
- * remaining buffers from before the resolution change
- * point, so call flush decode here
- */
- mtk_vdec_flush_decoder(ctx);
- /*
- * After all buffers containing decoded frames from
- * before the resolution change point ready to be
- * dequeued on the CAPTURE queue, the driver sends a
- * V4L2_EVENT_SOURCE_CHANGE event for source change
- * type V4L2_EVENT_SRC_CH_RESOLUTION
- */
- mtk_vdec_queue_res_chg_event(ctx);
- }
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
-}
-
static int vidioc_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
- switch (cmd->cmd) {
- case V4L2_DEC_CMD_STOP:
- case V4L2_DEC_CMD_START:
- if (cmd->flags != 0) {
- mtk_v4l2_err("cmd->flags=%u", cmd->flags);
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ /* Use M2M stateless helper if relevant */
+ if (ctx->dev->vdec_pdata->uses_stateless_api)
+ return v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv,
+ cmd);
+ else
+ return v4l2_m2m_ioctl_try_decoder_cmd(file, priv, cmd);
}
@@ -510,6 +69,10 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
if (ret)
return ret;
+ /* Use M2M stateless helper if relevant */
+ if (ctx->dev->vdec_pdata->uses_stateless_api)
+ return v4l2_m2m_ioctl_stateless_decoder_cmd(file, priv, cmd);
+
mtk_v4l2_debug(1, "decoder cmd=%u", cmd->cmd);
dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
@@ -525,8 +88,7 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
return 0;
}
- v4l2_m2m_buf_queue(ctx->m2m_ctx,
- &ctx->empty_flush_buf->m2m_buf.vb);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf.vb);
v4l2_m2m_try_schedule(ctx->m2m_ctx);
break;
@@ -561,10 +123,12 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
{
struct mtk_q_data *q_data;
+ ctx->dev->vdec_pdata->init_vdec_params(ctx);
+
ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
ctx->fh.m2m_ctx = ctx->m2m_ctx;
ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
- INIT_WORK(&ctx->decode_work, mtk_vdec_worker);
+ INIT_WORK(&ctx->decode_work, ctx->dev->vdec_pdata->worker);
ctx->colorspace = V4L2_COLORSPACE_REC709;
ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
@@ -574,7 +138,7 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
memset(q_data, 0, sizeof(struct mtk_q_data));
q_data->visible_width = DFT_CFG_WIDTH;
q_data->visible_height = DFT_CFG_HEIGHT;
- q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+ q_data->fmt = ctx->dev->vdec_pdata->default_out_fmt;
q_data->field = V4L2_FIELD_NONE;
q_data->sizeimage[0] = DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
@@ -586,7 +150,7 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
q_data->visible_height = DFT_CFG_HEIGHT;
q_data->coded_width = DFT_CFG_WIDTH;
q_data->coded_height = DFT_CFG_HEIGHT;
- q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+ q_data->fmt = ctx->dev->vdec_pdata->default_cap_fmt;
q_data->field = V4L2_FIELD_NONE;
v4l_bound_align_image(&q_data->coded_width,
@@ -660,19 +224,17 @@ static int vidioc_try_fmt(struct v4l2_format *f,
pix_fmt_mp->field = V4L2_FIELD_NONE;
+ pix_fmt_mp->width =
+ clamp(pix_fmt_mp->width, MTK_VDEC_MIN_W, MTK_VDEC_MAX_W);
+ pix_fmt_mp->height =
+ clamp(pix_fmt_mp->height, MTK_VDEC_MIN_H, MTK_VDEC_MAX_H);
+
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pix_fmt_mp->num_planes = 1;
pix_fmt_mp->plane_fmt[0].bytesperline = 0;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
int tmp_w, tmp_h;
- pix_fmt_mp->height = clamp(pix_fmt_mp->height,
- MTK_VDEC_MIN_H,
- MTK_VDEC_MAX_H);
- pix_fmt_mp->width = clamp(pix_fmt_mp->width,
- MTK_VDEC_MIN_W,
- MTK_VDEC_MAX_W);
-
/*
* Find next closer width align 64, heign align 64, size align
* 64 rectangle
@@ -722,11 +284,14 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
- fmt = mtk_vdec_find_format(f);
+ fmt = mtk_vdec_find_format(f, dec_pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ f->fmt.pix.pixelformat =
+ ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
}
return vidioc_try_fmt(f, fmt);
@@ -737,11 +302,14 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
const struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
- fmt = mtk_vdec_find_format(f);
+ fmt = mtk_vdec_find_format(f, dec_pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ f->fmt.pix.pixelformat =
+ ctx->q_data[MTK_Q_DATA_SRC].fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
}
if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
@@ -831,6 +399,7 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
struct mtk_q_data *q_data;
int ret = 0;
const struct mtk_video_fmt *fmt;
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
mtk_v4l2_debug(3, "[%d]", ctx->id);
@@ -843,7 +412,8 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
* Setting OUTPUT format after OUTPUT buffers are allocated is invalid
* if using the stateful API.
*/
- if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ if (!dec_pdata->uses_stateless_api &&
+ f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
mtk_v4l2_err("out_q_ctx buffers already requested");
ret = -EBUSY;
@@ -859,16 +429,16 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
ret = -EBUSY;
}
- fmt = mtk_vdec_find_format(f);
+ fmt = mtk_vdec_find_format(f, dec_pdata);
if (fmt == NULL) {
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
f->fmt.pix.pixelformat =
- mtk_video_formats[OUT_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ dec_pdata->default_out_fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
f->fmt.pix.pixelformat =
- mtk_video_formats[CAP_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ dec_pdata->default_cap_fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
}
}
if (fmt == NULL)
@@ -886,6 +456,7 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
ctx->quantization = pix_mp->quantization;
ctx->xfer_func = pix_mp->xfer_func;
+ ctx->current_codec = fmt->fourcc;
if (ctx->state == MTK_STATE_FREE) {
ret = vdec_if_init(ctx, q_data->fmt->fourcc);
if (ret) {
@@ -897,6 +468,48 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
}
}
+ /*
+ * If using the stateless API, S_FMT should have the effect of setting
+ * the CAPTURE queue resolution no matter which queue it was called on.
+ */
+ if (dec_pdata->uses_stateless_api) {
+ ctx->picinfo.pic_w = pix_mp->width;
+ ctx->picinfo.pic_h = pix_mp->height;
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo);
+ if (ret) {
+ mtk_v4l2_err("[%d]Error!! Get GET_PARAM_PICTURE_INFO Fail",
+ ctx->id);
+ return -EINVAL;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1) {
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ ctx->picinfo.fb_sz[0] +
+ ctx->picinfo.fb_sz[1];
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+ ctx->picinfo.buf_w;
+ } else {
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ ctx->picinfo.fb_sz[0];
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+ ctx->picinfo.buf_w;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1] =
+ ctx->picinfo.fb_sz[1];
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[1] =
+ ctx->picinfo.buf_w;
+ }
+
+ ctx->q_data[MTK_Q_DATA_DST].coded_width = ctx->picinfo.buf_w;
+ ctx->q_data[MTK_Q_DATA_DST].coded_height = ctx->picinfo.buf_h;
+ mtk_v4l2_debug(2, "[%d] vdec_if_init() num_plane = %d wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id, pix_mp->num_planes, ctx->picinfo.buf_w, ctx->picinfo.buf_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
+ }
return 0;
}
@@ -905,16 +518,17 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
{
int i = 0;
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
if (fsize->index != 0)
return -EINVAL;
- for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
- if (fsize->pixel_format != mtk_vdec_framesizes[i].fourcc)
+ for (i = 0; i < dec_pdata->num_framesizes; ++i) {
+ if (fsize->pixel_format != dec_pdata->vdec_framesizes[i].fourcc)
continue;
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = mtk_vdec_framesizes[i].stepwise;
+ fsize->stepwise = dec_pdata->vdec_framesizes[i].stepwise;
if (!(ctx->dev->dec_capability &
VCODEC_CAPABILITY_4K_DISABLED)) {
mtk_v4l2_debug(3, "4K is enabled");
@@ -937,16 +551,20 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
return -EINVAL;
}
-static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, void *priv,
+ bool output_queue)
{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
const struct mtk_video_fmt *fmt;
int i, j = 0;
- for (i = 0; i < NUM_FORMATS; i++) {
- if (output_queue && (mtk_video_formats[i].type != MTK_FMT_DEC))
+ for (i = 0; i < dec_pdata->num_formats; i++) {
+ if (output_queue &&
+ dec_pdata->vdec_formats[i].type != MTK_FMT_DEC)
continue;
if (!output_queue &&
- (mtk_video_formats[i].type != MTK_FMT_FRAME))
+ dec_pdata->vdec_formats[i].type != MTK_FMT_FRAME)
continue;
if (j == f->index)
@@ -954,10 +572,10 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
++j;
}
- if (i == NUM_FORMATS)
+ if (i == dec_pdata->num_formats)
return -EINVAL;
- fmt = &mtk_video_formats[i];
+ fmt = &dec_pdata->vdec_formats[i];
f->pixelformat = fmt->fourcc;
f->flags = fmt->flags;
@@ -967,13 +585,13 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
static int vidioc_vdec_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, false);
+ return vidioc_enum_fmt(f, priv, false);
}
static int vidioc_vdec_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, true);
+ return vidioc_enum_fmt(f, priv, true);
}
static int vidioc_vdec_g_fmt(struct file *file, void *priv,
@@ -1064,11 +682,9 @@ static int vidioc_vdec_g_fmt(struct file *file, void *priv,
return 0;
}
-static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers,
- unsigned int *nplanes,
- unsigned int sizes[],
- struct device *alloc_devs[])
+int vb2ops_vdec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
struct mtk_q_data *q_data;
@@ -1088,7 +704,7 @@ static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
}
} else {
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- *nplanes = 2;
+ *nplanes = q_data->fmt->num_planes;
else
*nplanes = 1;
@@ -1104,7 +720,7 @@ static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
return 0;
}
-static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
+int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct mtk_q_data *q_data;
@@ -1126,128 +742,7 @@ static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
return 0;
}
-static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *src_buf;
- struct mtk_vcodec_mem src_mem;
- bool res_chg = false;
- int ret = 0;
- unsigned int dpbsize = 1, i = 0;
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
- struct mtk_video_dec_buf *buf = NULL;
- struct mtk_q_data *dst_q_data;
-
- mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p",
- ctx->id, vb->vb2_queue->type,
- vb->index, vb);
- /*
- * check if this buffer is ready to be used after decode
- */
- if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- vb2_v4l2 = to_vb2_v4l2_buffer(vb);
- buf = container_of(vb2_v4l2, struct mtk_video_dec_buf,
- m2m_buf.vb);
- mutex_lock(&ctx->lock);
- if (!buf->used) {
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
- buf->queued_in_vb2 = true;
- buf->queued_in_v4l2 = true;
- } else {
- buf->queued_in_vb2 = false;
- buf->queued_in_v4l2 = true;
- }
- mutex_unlock(&ctx->lock);
- return;
- }
-
- v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
-
- if (ctx->state != MTK_STATE_INIT) {
- mtk_v4l2_debug(3, "[%d] already init driver %d",
- ctx->id, ctx->state);
- return;
- }
-
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- if (!src_buf) {
- mtk_v4l2_err("No src buffer");
- return;
- }
- buf = container_of(src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
- if (buf->lastframe) {
- /* This shouldn't happen. Just in case. */
- mtk_v4l2_err("Invalid flush buffer.");
- v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- return;
- }
-
- src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
- src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
- src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
- mtk_v4l2_debug(2,
- "[%d] buf id=%d va=%p dma=%pad size=%zx",
- ctx->id, src_buf->vb2_buf.index,
- src_mem.va, &src_mem.dma_addr,
- src_mem.size);
-
- ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
- if (ret || !res_chg) {
- /*
- * fb == NULL means to parse SPS/PPS header or
- * resolution info in src_mem. Decode can fail
- * if there is no SPS header or picture info
- * in bs
- */
-
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- if (ret == -EIO) {
- mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.",
- ctx->id);
- ctx->state = MTK_STATE_ABORT;
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
- } else {
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- }
- mtk_v4l2_debug(ret ? 0 : 1,
- "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
- ctx->id, src_buf->vb2_buf.index,
- src_mem.size, ret, res_chg);
- return;
- }
-
- if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
- mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
- ctx->id);
- return;
- }
-
- ctx->last_decoded_picinfo = ctx->picinfo;
- dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
- for (i = 0; i < dst_q_data->fmt->num_planes; i++) {
- dst_q_data->sizeimage[i] = ctx->picinfo.fb_sz[i];
- dst_q_data->bytesperline[i] = ctx->picinfo.buf_w;
- }
-
- mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
- ctx->id,
- ctx->picinfo.buf_w, ctx->picinfo.buf_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- dst_q_data->sizeimage[0],
- dst_q_data->sizeimage[1]);
-
- ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
- if (dpbsize == 0)
- mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
-
- ctx->dpb_size = dpbsize;
- ctx->state = MTK_STATE_HEADER;
- mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
-
- mtk_vdec_queue_res_chg_event(ctx);
-}
-
-static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
+void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2_v4l2;
@@ -1270,7 +765,7 @@ static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
}
}
-static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
+int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
struct vb2_v4l2_buffer, vb2_buf);
@@ -1280,14 +775,12 @@ static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
buf->used = false;
buf->queued_in_v4l2 = false;
- } else {
- buf->lastframe = false;
}
return 0;
}
-static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
@@ -1297,21 +790,25 @@ static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
}
-static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
+void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
{
struct vb2_v4l2_buffer *src_buf = NULL, *dst_buf = NULL;
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ int ret;
mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
- struct mtk_video_dec_buf *buf_info = container_of(
- src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
- if (!buf_info->lastframe)
+ if (src_buf != &ctx->empty_flush_buf.vb) {
+ struct media_request *req =
+ src_buf->vb2_buf.req_obj.req;
v4l2_m2m_buf_done(src_buf,
VB2_BUF_STATE_ERROR);
+ if (req)
+ v4l2_ctrl_request_complete(req, &ctx->ctrl_hdl);
+ }
}
return;
}
@@ -1334,7 +831,9 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
ctx->last_decoded_picinfo.buf_w,
ctx->last_decoded_picinfo.buf_h);
- mtk_vdec_flush_decoder(ctx);
+ ret = ctx->dev->vdec_pdata->flush_decoder(ctx);
+ if (ret)
+ mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
}
ctx->state = MTK_STATE_FLUSH;
@@ -1381,75 +880,12 @@ static void m2mops_vdec_job_abort(void *priv)
ctx->state = MTK_STATE_ABORT;
}
-static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
- if (ctx->state >= MTK_STATE_HEADER) {
- ctrl->val = ctx->dpb_size;
- } else {
- mtk_v4l2_debug(0, "Seqinfo not ready");
- ctrl->val = 0;
- }
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
- .g_volatile_ctrl = mtk_vdec_g_v_ctrl,
-};
-
-int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
-{
- struct v4l2_ctrl *ctrl;
-
- v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
-
- ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl,
- &mtk_vcodec_dec_ctrl_ops,
- V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
- 0, 32, 1, 1);
- ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
- v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl,
- &mtk_vcodec_dec_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
- V4L2_MPEG_VIDEO_VP9_PROFILE_0,
- 0, V4L2_MPEG_VIDEO_VP9_PROFILE_0);
-
- if (ctx->ctrl_hdl.error) {
- mtk_v4l2_err("Adding control failed %d",
- ctx->ctrl_hdl.error);
- return ctx->ctrl_hdl.error;
- }
-
- v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
- return 0;
-}
-
const struct v4l2_m2m_ops mtk_vdec_m2m_ops = {
.device_run = m2mops_vdec_device_run,
.job_ready = m2mops_vdec_job_ready,
.job_abort = m2mops_vdec_job_abort,
};
-static const struct vb2_ops mtk_vdec_vb2_ops = {
- .queue_setup = vb2ops_vdec_queue_setup,
- .buf_prepare = vb2ops_vdec_buf_prepare,
- .buf_queue = vb2ops_vdec_buf_queue,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
- .buf_init = vb2ops_vdec_buf_init,
- .buf_finish = vb2ops_vdec_buf_finish,
- .start_streaming = vb2ops_vdec_start_streaming,
- .stop_streaming = vb2ops_vdec_stop_streaming,
-};
-
const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
@@ -1496,7 +932,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
- src_vq->ops = &mtk_vdec_vb2_ops;
+ src_vq->ops = ctx->dev->vdec_pdata->vdec_vb2_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
@@ -1511,7 +947,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
- dst_vq->ops = &mtk_vdec_vb2_ops;
+ dst_vq->ops = ctx->dev->vdec_pdata->vdec_vb2_ops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->dev_mutex;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
index cf26b6c1486a..46783516b84a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -16,6 +16,8 @@
#define VCODEC_DEC_4K_CODED_HEIGHT 2304U
#define MTK_VDEC_MAX_W 2048U
#define MTK_VDEC_MAX_H 1088U
+#define MTK_VDEC_MIN_W 64U
+#define MTK_VDEC_MIN_H 64U
#define MTK_VDEC_IRQ_STATUS_DEC_SUCCESS 0x10000
@@ -40,9 +42,9 @@ struct vdec_fb {
* @queued_in_vb2: Capture buffer is queue in vb2
* @queued_in_v4l2: Capture buffer is in v4l2 driver, but not in vb2
* queue yet
- * @lastframe: Intput buffer is last buffer - EOS
* @error: An unrecoverable error occurs on this buffer.
* @frame_buffer: Decode status, and buffer information of Capture buffer
+ * @bs_buffer: Output buffer info
*
* Note : These status information help us track and debug buffer state
*/
@@ -52,13 +54,19 @@ struct mtk_video_dec_buf {
bool used;
bool queued_in_vb2;
bool queued_in_v4l2;
- bool lastframe;
bool error;
- struct vdec_fb frame_buffer;
+
+ union {
+ struct vdec_fb frame_buffer;
+ struct mtk_vcodec_mem bs_buffer;
+ };
};
extern const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops;
extern const struct v4l2_m2m_ops mtk_vdec_m2m_ops;
+extern const struct media_device_ops mtk_vcodec_media_ops;
+extern const struct mtk_vcodec_dec_pdata mtk_vdec_8173_pdata;
+extern const struct mtk_vcodec_dec_pdata mtk_vdec_8183_pdata;
/*
@@ -73,7 +81,18 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx);
void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx);
-int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx);
+
+/*
+ * VB2 ops
+ */
+int vb2ops_vdec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[]);
+int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb);
+void vb2ops_vdec_buf_finish(struct vb2_buffer *vb);
+int vb2ops_vdec_buf_init(struct vb2_buffer *vb);
+int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count);
+void vb2ops_vdec_stop_streaming(struct vb2_queue *q);
#endif /* _MTK_VCODEC_DEC_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index f87dc47d9e63..e6e6a8203eeb 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -14,6 +14,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_dec.h"
@@ -81,21 +82,14 @@ static int fops_vcodec_open(struct file *file)
{
struct mtk_vcodec_dev *dev = video_drvdata(file);
struct mtk_vcodec_ctx *ctx = NULL;
- struct mtk_video_dec_buf *mtk_buf = NULL;
int ret = 0;
struct vb2_queue *src_vq;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- mtk_buf = kzalloc(sizeof(*mtk_buf), GFP_KERNEL);
- if (!mtk_buf) {
- kfree(ctx);
- return -ENOMEM;
- }
mutex_lock(&dev->dev_mutex);
- ctx->empty_flush_buf = mtk_buf;
ctx->id = dev->id_counter++;
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
@@ -106,7 +100,7 @@ static int fops_vcodec_open(struct file *file)
mutex_init(&ctx->lock);
ctx->type = MTK_INST_DECODER;
- ret = mtk_vcodec_dec_ctrls_setup(ctx);
+ ret = dev->vdec_pdata->ctrls_setup(ctx);
if (ret) {
mtk_v4l2_err("Failed to setup mt vcodec controls");
goto err_ctrls_setup;
@@ -121,8 +115,7 @@ static int fops_vcodec_open(struct file *file)
}
src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- ctx->empty_flush_buf->m2m_buf.vb.vb2_buf.vb2_queue = src_vq;
- ctx->empty_flush_buf->lastframe = true;
+ ctx->empty_flush_buf.vb.vb2_buf.vb2_queue = src_vq;
mtk_vcodec_dec_set_default_params(ctx);
if (v4l2_fh_is_singular(&ctx->fh)) {
@@ -162,7 +155,6 @@ err_m2m_ctx_init:
err_ctrls_setup:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
- kfree(ctx->empty_flush_buf);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -193,7 +185,6 @@ static int fops_vcodec_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
list_del_init(&ctx->list);
- kfree(ctx->empty_flush_buf);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -224,6 +215,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dev->ctx_list);
dev->plat_dev = pdev;
+ dev->vdec_pdata = of_device_get_match_data(&pdev->dev);
if (!of_property_read_u32(pdev->dev.of_node, "mediatek,vpu",
&rproc_phandle)) {
fw_type = VPU;
@@ -325,18 +317,47 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_event_workq;
}
+ if (dev->vdec_pdata->uses_stateless_api) {
+ dev->mdev_dec.dev = &pdev->dev;
+ strscpy(dev->mdev_dec.model, MTK_VCODEC_DEC_NAME,
+ sizeof(dev->mdev_dec.model));
+
+ media_device_init(&dev->mdev_dec);
+ dev->mdev_dec.ops = &mtk_vcodec_media_ops;
+ dev->v4l2_dev.mdev = &dev->mdev_dec;
+
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev_dec, dev->vfd_dec,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ mtk_v4l2_err("Failed to register media controller");
+ goto err_reg_cont;
+ }
+
+ ret = media_device_register(&dev->mdev_dec);
+ if (ret) {
+ mtk_v4l2_err("Failed to register media device");
+ goto err_media_reg;
+ }
+
+ mtk_v4l2_debug(0, "media registered as /dev/media%d", vfd_dec->minor);
+ }
ret = video_register_device(vfd_dec, VFL_TYPE_VIDEO, 0);
if (ret) {
mtk_v4l2_err("Failed to register video device");
goto err_dec_reg;
}
- mtk_v4l2_debug(0, "decoder registered as /dev/video%d",
- vfd_dec->num);
+ mtk_v4l2_debug(0, "decoder registered as /dev/video%d", vfd_dec->minor);
return 0;
err_dec_reg:
+ if (dev->vdec_pdata->uses_stateless_api)
+ media_device_unregister(&dev->mdev_dec);
+err_media_reg:
+ if (dev->vdec_pdata->uses_stateless_api)
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev_dec);
+err_reg_cont:
destroy_workqueue(dev->decode_workqueue);
err_event_workq:
v4l2_m2m_release(dev->m2m_dev_dec);
@@ -352,7 +373,14 @@ err_dec_pm:
}
static const struct of_device_id mtk_vcodec_match[] = {
- {.compatible = "mediatek,mt8173-vcodec-dec",},
+ {
+ .compatible = "mediatek,mt8173-vcodec-dec",
+ .data = &mtk_vdec_8173_pdata,
+ },
+ {
+ .compatible = "mediatek,mt8183-vcodec-dec",
+ .data = &mtk_vdec_8183_pdata,
+ },
{},
};
@@ -364,6 +392,13 @@ static int mtk_vcodec_dec_remove(struct platform_device *pdev)
flush_workqueue(dev->decode_workqueue);
destroy_workqueue(dev->decode_workqueue);
+
+ if (media_devnode_is_registered(dev->mdev_dec.devnode)) {
+ media_device_unregister(&dev->mdev_dec);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev_dec);
+ media_device_cleanup(&dev->mdev_dec);
+ }
+
if (dev->m2m_dev_dec)
v4l2_m2m_release(dev->m2m_dev_dec);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c
new file mode 100644
index 000000000000..bef49244e61b
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "vdec_drv_if.h"
+
+static const struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MT21C,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+#define DEFAULT_OUT_FMT_IDX 0
+#define DEFAULT_CAP_FMT_IDX 3
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+
+/*
+ * This function tries to clean all display buffers, the buffers will return
+ * in display order.
+ * Note the buffers returned from codec driver may still be in driver's
+ * reference list.
+ */
+static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_fb *disp_frame_buffer = NULL;
+ struct mtk_video_dec_buf *dstbuf;
+ struct vb2_v4l2_buffer *vb;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ if (vdec_if_get_param(ctx, GET_PARAM_DISP_FRAME_BUFFER,
+ &disp_frame_buffer)) {
+ mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER", ctx->id);
+ return NULL;
+ }
+
+ if (!disp_frame_buffer) {
+ mtk_v4l2_debug(3, "No display frame buffer");
+ return NULL;
+ }
+
+ dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+ vb = &dstbuf->m2m_buf.vb;
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ vb2_set_plane_payload(&vb->vb2_buf, 0, ctx->picinfo.fb_sz[0]);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ vb2_set_plane_payload(&vb->vb2_buf, 1,
+ ctx->picinfo.fb_sz[1]);
+
+ mtk_v4l2_debug(2, "[%d]status=%x queue id=%d to done_list %d",
+ ctx->id, disp_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2);
+
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
+ ctx->decoded_frame_cnt++;
+ }
+ mutex_unlock(&ctx->lock);
+ return &vb->vb2_buf;
+}
+
+/*
+ * This function tries to clean all capture buffers that are not used as
+ * reference buffers by codec driver any more
+ * In this case, we need re-queue buffer to vb2 buffer if user space
+ * already returns this buffer to v4l2 or this buffer is just the output of
+ * previous sps/pps/resolution change decode, or do nothing if user
+ * space still owns this buffer
+ */
+static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_video_dec_buf *dstbuf;
+ struct vdec_fb *free_frame_buffer = NULL;
+ struct vb2_v4l2_buffer *vb;
+
+ if (vdec_if_get_param(ctx, GET_PARAM_FREE_FRAME_BUFFER,
+ &free_frame_buffer)) {
+ mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
+ return NULL;
+ }
+ if (!free_frame_buffer) {
+ mtk_v4l2_debug(3, " No free frame buffer");
+ return NULL;
+ }
+
+ mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p", ctx->id,
+ free_frame_buffer);
+
+ dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+ vb = &dstbuf->m2m_buf.vb;
+
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ if (dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2 &&
+ free_frame_buffer->status == FB_ST_FREE) {
+ /*
+ * After decode sps/pps or non-display buffer, we don't
+ * need to return capture buffer to user space, but
+ * just re-queue this capture buffer to vb2 queue.
+ * This reduce overheads that dq/q unused capture
+ * buffer. In this case, queued_in_vb2 = true.
+ */
+ mtk_v4l2_debug(2, "[%d]status=%x queue id=%d to rdy_queue %d",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ } else if (!dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2) {
+ /*
+ * If buffer in v4l2 driver but not in vb2 queue yet,
+ * and we get this buffer from free_list, it means
+ * that codec driver do not use this buffer as
+ * reference buffer anymore. We should q buffer to vb2
+ * queue, so later work thread could get this buffer
+ * for decode. In this case, queued_in_vb2 = false
+ * means this buffer is not from previous decode
+ * output.
+ */
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to rdy_queue",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ dstbuf->queued_in_vb2 = true;
+ } else {
+ /*
+ * Codec driver do not need to reference this capture
+ * buffer and this buffer is not in v4l2 driver.
+ * Then we don't need to do any thing, just add log when
+ * we need to debug buffer flow.
+ * When this buffer q from user space, it could
+ * directly q to vb2 buffer
+ */
+ mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2,
+ dstbuf->queued_in_v4l2);
+ }
+ dstbuf->used = false;
+ }
+ mutex_unlock(&ctx->lock);
+ return &vb->vb2_buf;
+}
+
+static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ while (get_display_buffer(ctx))
+ ;
+}
+
+static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ while (get_free_buffer(ctx))
+ ;
+}
+
+static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
+{
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ mtk_v4l2_debug(1, "[%d]", ctx->id);
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+}
+
+static int mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+ bool res_chg;
+ int ret;
+
+ ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ if (ret)
+ mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
+
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+
+ return 0;
+}
+
+static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
+ unsigned int pixelformat)
+{
+ const struct mtk_video_fmt *fmt;
+ struct mtk_q_data *dst_q_data;
+ unsigned int k;
+
+ dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &mtk_video_formats[k];
+ if (fmt->fourcc == pixelformat) {
+ mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
+ dst_q_data->fmt->fourcc, pixelformat);
+ dst_q_data->fmt = fmt;
+ return;
+ }
+ }
+
+ mtk_v4l2_err("Cannot get fourcc(%d), using init value", pixelformat);
+}
+
+static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
+{
+ unsigned int dpbsize = 0;
+ int ret;
+
+ if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO,
+ &ctx->last_decoded_picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR", ctx->id);
+ return -EINVAL;
+ }
+
+ if (ctx->last_decoded_picinfo.pic_w == 0 ||
+ ctx->last_decoded_picinfo.pic_h == 0 ||
+ ctx->last_decoded_picinfo.buf_w == 0 ||
+ ctx->last_decoded_picinfo.buf_h == 0) {
+ mtk_v4l2_err("Cannot get correct pic info");
+ return -EINVAL;
+ }
+
+ if (ctx->last_decoded_picinfo.cap_fourcc != ctx->picinfo.cap_fourcc &&
+ ctx->picinfo.cap_fourcc != 0)
+ mtk_vdec_update_fmt(ctx, ctx->picinfo.cap_fourcc);
+
+ if (ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w ||
+ ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h)
+ return 0;
+
+ mtk_v4l2_debug(1, "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)", ctx->id,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h, ctx->picinfo.pic_w,
+ ctx->picinfo.pic_h, ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
+
+ ctx->dpb_size = dpbsize;
+
+ return ret;
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx =
+ container_of(work, struct mtk_vcodec_ctx, decode_work);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct mtk_vcodec_mem buf;
+ struct vdec_fb *pfb;
+ bool res_chg = false;
+ int ret;
+ struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
+ return;
+ }
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (!dst_buf) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
+ return;
+ }
+
+ dst_buf_info =
+ container_of(dst_buf, struct mtk_video_dec_buf, m2m_buf.vb);
+
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ pfb->base_y.dma_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ pfb->base_y.size = ctx->picinfo.fb_sz[0];
+
+ pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1);
+ pfb->base_c.dma_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
+ pfb->base_c.size = ctx->picinfo.fb_sz[1];
+ pfb->status = 0;
+ mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
+
+ mtk_v4l2_debug(3,
+ "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
+ dst_buf->vb2_buf.index, pfb, pfb->base_y.va,
+ &pfb->base_y.dma_addr, &pfb->base_c.dma_addr, pfb->base_y.size);
+
+ if (src_buf == &ctx->empty_flush_buf.vb) {
+ mtk_v4l2_debug(1, "Got empty flush input buffer.");
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+
+ /* update dst buf status */
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = false;
+ mutex_unlock(&ctx->lock);
+
+ vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ clean_display_buffer(ctx);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ clean_free_buffer(ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ return;
+ }
+
+ src_buf_info =
+ container_of(src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
+
+ buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
+ if (!buf.va) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_err("[%d] id=%d src_addr is NULL!!", ctx->id,
+ src_buf->vb2_buf.index);
+ return;
+ }
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->timecode = src_buf->timecode;
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = true;
+ mutex_unlock(&ctx->lock);
+ src_buf_info->used = true;
+
+ ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
+
+ if (ret) {
+ mtk_v4l2_err(" <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
+ ctx->id, src_buf->vb2_buf.index, buf.size,
+ src_buf->vb2_buf.timestamp, dst_buf->vb2_buf.index, ret, res_chg);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ if (ret == -EIO) {
+ mutex_lock(&ctx->lock);
+ src_buf_info->error = true;
+ mutex_unlock(&ctx->lock);
+ }
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ } else if (!res_chg) {
+ /*
+ * we only return src buffer with VB2_BUF_STATE_DONE
+ * when decode success without resolution change
+ */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+
+ if (!ret && res_chg) {
+ mtk_vdec_pic_info_update(ctx);
+ /*
+ * On encountering a resolution change in the stream.
+ * The driver must first process and decode all
+ * remaining buffers from before the resolution change
+ * point, so call flush decode here
+ */
+ mtk_vdec_flush_decoder(ctx);
+ /*
+ * After all buffers containing decoded frames from
+ * before the resolution change point ready to be
+ * dequeued on the CAPTURE queue, the driver sends a
+ * V4L2_EVENT_SOURCE_CHANGE event for source change
+ * type V4L2_EVENT_SRC_CH_RESOLUTION
+ */
+ mtk_vdec_queue_res_chg_event(ctx);
+ }
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+}
+
+static void vb2ops_vdec_stateful_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *src_buf;
+ struct mtk_vcodec_mem src_mem;
+ bool res_chg = false;
+ int ret;
+ unsigned int dpbsize = 1, i;
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ struct mtk_q_data *dst_q_data;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id,
+ vb->vb2_queue->type, vb->index, vb);
+ /*
+ * check if this buffer is ready to be used after decode
+ */
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct mtk_video_dec_buf *buf;
+
+ vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf,
+ m2m_buf.vb);
+ mutex_lock(&ctx->lock);
+ if (!buf->used) {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+ buf->queued_in_vb2 = true;
+ buf->queued_in_v4l2 = true;
+ } else {
+ buf->queued_in_vb2 = false;
+ buf->queued_in_v4l2 = true;
+ }
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+
+ if (ctx->state != MTK_STATE_INIT) {
+ mtk_v4l2_debug(3, "[%d] already init driver %d", ctx->id,
+ ctx->state);
+ return;
+ }
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf) {
+ mtk_v4l2_err("No src buffer");
+ return;
+ }
+
+ if (src_buf == &ctx->empty_flush_buf.vb) {
+ /* This shouldn't happen. Just in case. */
+ mtk_v4l2_err("Invalid flush buffer.");
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ return;
+ }
+
+ src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
+ mtk_v4l2_debug(2, "[%d] buf id=%d va=%p dma=%pad size=%zx", ctx->id,
+ src_buf->vb2_buf.index, src_mem.va, &src_mem.dma_addr,
+ src_mem.size);
+
+ ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
+ if (ret || !res_chg) {
+ /*
+ * fb == NULL means to parse SPS/PPS header or
+ * resolution info in src_mem. Decode can fail
+ * if there is no SPS header or picture info
+ * in bs
+ */
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ if (ret == -EIO) {
+ mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.", ctx->id);
+ ctx->state = MTK_STATE_ABORT;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
+ mtk_v4l2_debug(ret ? 0 : 1,
+ "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+ ctx->id, src_buf->vb2_buf.index, src_mem.size, ret, res_chg);
+ return;
+ }
+
+ if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR", ctx->id);
+ return;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+ dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ for (i = 0; i < dst_q_data->fmt->num_planes; i++) {
+ dst_q_data->sizeimage[i] = ctx->picinfo.fb_sz[i];
+ dst_q_data->bytesperline[i] = ctx->picinfo.buf_w;
+ }
+
+ mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id, ctx->picinfo.buf_w, ctx->picinfo.buf_h, ctx->picinfo.pic_w,
+ ctx->picinfo.pic_h, dst_q_data->sizeimage[0], dst_q_data->sizeimage[1]);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
+
+ ctx->dpb_size = dpbsize;
+ ctx->state = MTK_STATE_HEADER;
+ mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
+
+ mtk_vdec_queue_res_chg_event(ctx);
+}
+
+static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (ctx->state >= MTK_STATE_HEADER) {
+ ctrl->val = ctx->dpb_size;
+ } else {
+ mtk_v4l2_debug(0, "Seqinfo not ready");
+ ctrl->val = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
+ .g_volatile_ctrl = mtk_vdec_g_v_ctrl,
+};
+
+static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl, &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 0, 32, 1, 1);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl, &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0, 0,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0);
+ /*
+ * H264. Baseline / Extended decoding is not supported.
+ */
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl, &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
+
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("Adding control failed %d", ctx->ctrl_hdl.error);
+ return ctx->ctrl_hdl.error;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+ return 0;
+}
+
+static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+{
+}
+
+static struct vb2_ops mtk_vdec_frame_vb2_ops = {
+ .queue_setup = vb2ops_vdec_queue_setup,
+ .buf_prepare = vb2ops_vdec_buf_prepare,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vb2ops_vdec_start_streaming,
+
+ .buf_queue = vb2ops_vdec_stateful_buf_queue,
+ .buf_init = vb2ops_vdec_buf_init,
+ .buf_finish = vb2ops_vdec_buf_finish,
+ .stop_streaming = vb2ops_vdec_stop_streaming,
+};
+
+const struct mtk_vcodec_dec_pdata mtk_vdec_8173_pdata = {
+ .chip = MTK_MT8173,
+ .init_vdec_params = mtk_init_vdec_params,
+ .ctrls_setup = mtk_vcodec_dec_ctrls_setup,
+ .vdec_vb2_ops = &mtk_vdec_frame_vb2_ops,
+ .vdec_formats = mtk_video_formats,
+ .num_formats = NUM_FORMATS,
+ .default_out_fmt = &mtk_video_formats[DEFAULT_OUT_FMT_IDX],
+ .default_cap_fmt = &mtk_video_formats[DEFAULT_CAP_FMT_IDX],
+ .vdec_framesizes = mtk_vdec_framesizes,
+ .num_framesizes = NUM_SUPPORTED_FRAMESIZE,
+ .worker = mtk_vdec_worker,
+ .flush_decoder = mtk_vdec_flush_decoder,
+};
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
new file mode 100644
index 000000000000..8f4a1f0a0769
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/module.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "vdec_drv_if.h"
+
+/**
+ * struct mtk_stateless_control - CID control type
+ * @cfg: control configuration
+ * @codec_type: codec type (V4L2 pixel format) for CID control type
+ */
+struct mtk_stateless_control {
+ struct v4l2_ctrl_config cfg;
+ int codec_type;
+};
+
+static const struct mtk_stateless_control mtk_stateless_controls[] = {
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SPS,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_PPS,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .menu_skip_mask =
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
+ .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_START_CODE,
+ .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ }
+};
+
+#define NUM_CTRLS ARRAY_SIZE(mtk_stateless_controls)
+
+static const struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MM21,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+#define DEFAULT_OUT_FMT_IDX 0
+#define DEFAULT_CAP_FMT_IDX 1
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+
+static void mtk_vdec_stateless_set_dst_payload(struct mtk_vcodec_ctx *ctx,
+ struct vdec_fb *fb)
+{
+ struct mtk_video_dec_buf *vdec_frame_buf =
+ container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+ struct vb2_v4l2_buffer *vb = &vdec_frame_buf->m2m_buf.vb;
+ unsigned int cap_y_size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[0];
+
+ vb2_set_plane_payload(&vb->vb2_buf, 0, cap_y_size);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2) {
+ unsigned int cap_c_size =
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1];
+
+ vb2_set_plane_payload(&vb->vb2_buf, 1, cap_c_size);
+ }
+}
+
+static struct vdec_fb *vdec_get_cap_buffer(struct mtk_vcodec_ctx *ctx,
+ struct vb2_v4l2_buffer *vb2_v4l2)
+{
+ struct mtk_video_dec_buf *framebuf =
+ container_of(vb2_v4l2, struct mtk_video_dec_buf, m2m_buf.vb);
+ struct vdec_fb *pfb = &framebuf->frame_buffer;
+ struct vb2_buffer *dst_buf = &vb2_v4l2->vb2_buf;
+
+ pfb = &framebuf->frame_buffer;
+ pfb->base_y.va = NULL;
+ pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->base_y.size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[0];
+
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2) {
+ pfb->base_c.va = NULL;
+ pfb->base_c.dma_addr =
+ vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ pfb->base_c.size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[1];
+ }
+ mtk_v4l2_debug(1, "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx frame_count = %d",
+ dst_buf->index, pfb, pfb->base_y.va, &pfb->base_y.dma_addr,
+ &pfb->base_c.dma_addr, pfb->base_y.size, ctx->decoded_frame_cnt);
+
+ return pfb;
+}
+
+static void vb2ops_vdec_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_hdl);
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx =
+ container_of(work, struct mtk_vcodec_ctx, decode_work);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_v4l2_src, *vb2_v4l2_dst;
+ struct vb2_buffer *vb2_src;
+ struct mtk_vcodec_mem *bs_src;
+ struct mtk_video_dec_buf *dec_buf_src;
+ struct media_request *src_buf_req;
+ struct vdec_fb *dst_buf;
+ bool res_chg = false;
+ int ret;
+
+ vb2_v4l2_src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!vb2_v4l2_src) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] no available source buffer", ctx->id);
+ return;
+ }
+
+ vb2_v4l2_dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (!vb2_v4l2_dst) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] no available destination buffer", ctx->id);
+ return;
+ }
+
+ vb2_src = &vb2_v4l2_src->vb2_buf;
+ dec_buf_src = container_of(vb2_v4l2_src, struct mtk_video_dec_buf,
+ m2m_buf.vb);
+ bs_src = &dec_buf_src->bs_buffer;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id,
+ vb2_src->vb2_queue->type, vb2_src->index, vb2_src);
+
+ bs_src->va = NULL;
+ bs_src->dma_addr = vb2_dma_contig_plane_dma_addr(vb2_src, 0);
+ bs_src->size = (size_t)vb2_src->planes[0].bytesused;
+
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, bs_src->va, &bs_src->dma_addr, bs_src->size, vb2_src);
+ /* Apply request controls. */
+ src_buf_req = vb2_src->req_obj.req;
+ if (src_buf_req)
+ v4l2_ctrl_request_setup(src_buf_req, &ctx->ctrl_hdl);
+ else
+ mtk_v4l2_err("vb2 buffer media request is NULL");
+
+ dst_buf = vdec_get_cap_buffer(ctx, vb2_v4l2_dst);
+ v4l2_m2m_buf_copy_metadata(vb2_v4l2_src, vb2_v4l2_dst, true);
+ ret = vdec_if_decode(ctx, bs_src, dst_buf, &res_chg);
+ if (ret) {
+ mtk_v4l2_err(" <===[%d], src_buf[%d] sz=0x%zx pts=%llu vdec_if_decode() ret=%d res_chg=%d===>",
+ ctx->id, vb2_src->index, bs_src->size,
+ vb2_src->timestamp, ret, res_chg);
+ if (ret == -EIO) {
+ mutex_lock(&ctx->lock);
+ dec_buf_src->error = true;
+ mutex_unlock(&ctx->lock);
+ }
+ }
+
+ mtk_vdec_stateless_set_dst_payload(ctx, dst_buf);
+
+ v4l2_m2m_buf_done_and_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx,
+ ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
+}
+
+static void vb2ops_vdec_stateless_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id, vb->vb2_queue->type, vb->index, vb);
+
+ mutex_lock(&ctx->lock);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+ mutex_unlock(&ctx->lock);
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return;
+
+ /* If an OUTPUT buffer, we may need to update the state */
+ if (ctx->state == MTK_STATE_INIT) {
+ ctx->state = MTK_STATE_HEADER;
+ mtk_v4l2_debug(1, "Init driver from init to header.");
+ } else {
+ mtk_v4l2_debug(3, "[%d] already init driver %d", ctx->id, ctx->state);
+ }
+}
+
+static int mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+ bool res_chg;
+
+ return vdec_if_decode(ctx, NULL, NULL, &res_chg);
+}
+
+static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ unsigned int i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, NUM_CTRLS);
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_hdl.error;
+ }
+
+ for (i = 0; i < NUM_CTRLS; i++) {
+ struct v4l2_ctrl_config cfg = mtk_stateless_controls[i].cfg;
+
+ v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &cfg, NULL);
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("Adding control %d failed %d", i, ctx->ctrl_hdl.error);
+ return ctx->ctrl_hdl.error;
+ }
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+
+ return 0;
+}
+
+static int fops_media_request_validate(struct media_request *mreq)
+{
+ const unsigned int buffer_cnt = vb2_request_buffer_cnt(mreq);
+
+ switch (buffer_cnt) {
+ case 1:
+ /* We expect exactly one buffer with the request */
+ break;
+ case 0:
+ mtk_v4l2_debug(1, "No buffer provided with the request");
+ return -ENOENT;
+ default:
+ mtk_v4l2_debug(1, "Too many buffers (%d) provided with the request",
+ buffer_cnt);
+ return -EINVAL;
+ }
+
+ return vb2_request_validate(mreq);
+}
+
+const struct media_device_ops mtk_vcodec_media_ops = {
+ .req_validate = fops_media_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+{
+ struct vb2_queue *src_vq;
+
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ /* Support request api for output plane */
+ src_vq->supports_requests = true;
+ src_vq->requires_requests = true;
+}
+
+static int vb2ops_vdec_out_buf_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static struct vb2_ops mtk_vdec_request_vb2_ops = {
+ .queue_setup = vb2ops_vdec_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vb2ops_vdec_start_streaming,
+ .stop_streaming = vb2ops_vdec_stop_streaming,
+
+ .buf_queue = vb2ops_vdec_stateless_buf_queue,
+ .buf_out_validate = vb2ops_vdec_out_buf_validate,
+ .buf_init = vb2ops_vdec_buf_init,
+ .buf_prepare = vb2ops_vdec_buf_prepare,
+ .buf_finish = vb2ops_vdec_buf_finish,
+ .buf_request_complete = vb2ops_vdec_buf_request_complete,
+};
+
+const struct mtk_vcodec_dec_pdata mtk_vdec_8183_pdata = {
+ .chip = MTK_MT8183,
+ .init_vdec_params = mtk_init_vdec_params,
+ .ctrls_setup = mtk_vcodec_dec_ctrls_setup,
+ .vdec_vb2_ops = &mtk_vdec_request_vb2_ops,
+ .vdec_formats = mtk_video_formats,
+ .num_formats = NUM_FORMATS,
+ .default_out_fmt = &mtk_video_formats[DEFAULT_OUT_FMT_IDX],
+ .default_cap_fmt = &mtk_video_formats[DEFAULT_CAP_FMT_IDX],
+ .vdec_framesizes = mtk_vdec_framesizes,
+ .num_framesizes = NUM_SUPPORTED_FRAMESIZE,
+ .uses_stateless_api = true,
+ .worker = mtk_vdec_worker,
+ .flush_decoder = mtk_vdec_flush_decoder,
+};
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index c6c7672fecfb..581522177308 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -13,6 +13,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
#include "mtk_vcodec_util.h"
@@ -249,7 +250,10 @@ struct vdec_pic_info {
* @decode_work: worker for the decoding
* @encode_work: worker for the encoding
* @last_decoded_picinfo: pic information get from latest decode
- * @empty_flush_buf: a fake size-0 capture buffer that indicates flush
+ * @empty_flush_buf: a fake size-0 capture buffer that indicates flush. Only
+ * to be used with encoder and stateful decoder.
+ * @is_flushing: set to true if flushing is in progress.
+ * @current_codec: current set input codec, in V4L2 pixel format
*
* @colorspace: enum v4l2_colorspace; supplemental to pixelformat
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
@@ -288,7 +292,10 @@ struct mtk_vcodec_ctx {
struct work_struct decode_work;
struct work_struct encode_work;
struct vdec_pic_info last_decoded_picinfo;
- struct mtk_video_dec_buf *empty_flush_buf;
+ struct v4l2_m2m_buffer empty_flush_buf;
+ bool is_flushing;
+
+ u32 current_codec;
enum v4l2_colorspace colorspace;
enum v4l2_ycbcr_encoding ycbcr_enc;
@@ -304,6 +311,50 @@ enum mtk_chip {
MTK_MT8173,
MTK_MT8183,
MTK_MT8192,
+ MTK_MT8195,
+};
+
+/**
+ * struct mtk_vcodec_dec_pdata - compatible data for each IC
+ * @init_vdec_params: init vdec params
+ * @ctrls_setup: init vcodec dec ctrls
+ * @worker: worker to start a decode job
+ * @flush_decoder: function that flushes the decoder
+ *
+ * @vdec_vb2_ops: struct vb2_ops
+ *
+ * @vdec_formats: supported video decoder formats
+ * @num_formats: count of video decoder formats
+ * @default_out_fmt: default output buffer format
+ * @default_cap_fmt: default capture buffer format
+ *
+ * @vdec_framesizes: supported video decoder frame sizes
+ * @num_framesizes: count of video decoder frame sizes
+ *
+ * @chip: chip this decoder is compatible with
+ *
+ * @uses_stateless_api: whether the decoder uses the stateless API with requests
+ */
+
+struct mtk_vcodec_dec_pdata {
+ void (*init_vdec_params)(struct mtk_vcodec_ctx *ctx);
+ int (*ctrls_setup)(struct mtk_vcodec_ctx *ctx);
+ void (*worker)(struct work_struct *work);
+ int (*flush_decoder)(struct mtk_vcodec_ctx *ctx);
+
+ struct vb2_ops *vdec_vb2_ops;
+
+ const struct mtk_video_fmt *vdec_formats;
+ const int num_formats;
+ const struct mtk_video_fmt *default_out_fmt;
+ const struct mtk_video_fmt *default_cap_fmt;
+
+ const struct mtk_codec_framesizes *vdec_framesizes;
+ const int num_framesizes;
+
+ enum mtk_chip chip;
+
+ bool uses_stateless_api;
};
/**
@@ -339,6 +390,7 @@ struct mtk_vcodec_enc_pdata {
* struct mtk_vcodec_dev - driver data
* @v4l2_dev: V4L2 device to register video devices for.
* @vfd_dec: Video device for decoder
+ * @mdev_dec: Media device for decoder
* @vfd_enc: Video device for encoder.
*
* @m2m_dev_dec: m2m device for decoder
@@ -349,6 +401,7 @@ struct mtk_vcodec_enc_pdata {
* @curr_ctx: The context that is waiting for codec hardware
*
* @reg_base: Mapped address of MTK Vcodec registers.
+ * @vdec_pdata: decoder IC-specific data
* @venc_pdata: encoder IC-specific data
*
* @fw_handler: used to communicate with the firmware.
@@ -375,6 +428,7 @@ struct mtk_vcodec_enc_pdata {
struct mtk_vcodec_dev {
struct v4l2_device v4l2_dev;
struct video_device *vfd_dec;
+ struct media_device mdev_dec;
struct video_device *vfd_enc;
struct v4l2_m2m_dev *m2m_dev_dec;
@@ -384,6 +438,7 @@ struct mtk_vcodec_dev {
spinlock_t irqlock;
struct mtk_vcodec_ctx *curr_ctx;
void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
+ const struct mtk_vcodec_dec_pdata *vdec_pdata;
const struct mtk_vcodec_enc_pdata *venc_pdata;
struct mtk_vcodec_fw *fw_handler;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 416f356af363..7457451ebff0 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -672,6 +672,7 @@ static int vidioc_venc_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ int ret;
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
@@ -679,7 +680,83 @@ static int vidioc_venc_dqbuf(struct file *file, void *priv,
return -EIO;
}
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+ ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+ if (ret)
+ return ret;
+
+ /*
+ * Complete flush if the user dequeued the 0-payload LAST buffer.
+ * We check the payload because a buffer with the LAST flag can also
+ * be seen during resolution changes. If we happen to be flushing at
+ * that time, the last buffer before the resolution changes could be
+ * misinterpreted for the buffer generated by the flush and terminate
+ * it earlier than we want.
+ */
+ if (!V4L2_TYPE_IS_OUTPUT(buf->type) &&
+ buf->flags & V4L2_BUF_FLAG_LAST &&
+ buf->m.planes[0].bytesused == 0 &&
+ ctx->is_flushing) {
+ /*
+ * Last CAPTURE buffer is dequeued, we can allow another flush
+ * to take place.
+ */
+ ctx->is_flushing = false;
+ }
+
+ return 0;
+}
+
+static int vidioc_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *cmd)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *src_vq, *dst_vq;
+ int ret;
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call to CMD after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, cmd);
+ if (ret)
+ return ret;
+
+ /* Calling START or STOP is invalid if a flush is in progress */
+ if (ctx->is_flushing)
+ return -EBUSY;
+
+ mtk_v4l2_debug(1, "encoder cmd=%u", cmd->cmd);
+
+ dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ switch (cmd->cmd) {
+ case V4L2_ENC_CMD_STOP:
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!vb2_is_streaming(src_vq)) {
+ mtk_v4l2_debug(1, "Output stream is off. No need to flush.");
+ return 0;
+ }
+ if (!vb2_is_streaming(dst_vq)) {
+ mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
+ return 0;
+ }
+ ctx->is_flushing = true;
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf.vb);
+ v4l2_m2m_try_schedule(ctx->m2m_ctx);
+ break;
+
+ case V4L2_ENC_CMD_START:
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
@@ -715,6 +792,9 @@ const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
.vidioc_g_selection = vidioc_venc_g_selection,
.vidioc_s_selection = vidioc_venc_s_selection,
+
+ .vidioc_encoder_cmd = vidioc_encoder_cmd,
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
};
static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
@@ -793,7 +873,7 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
struct venc_enc_param param;
- int ret;
+ int ret, pm_ret;
int i;
/* Once state turn into MTK_STATE_ABORT, we need stop_streaming
@@ -845,9 +925,9 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
err_set_param:
- ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
- if (ret < 0)
- mtk_v4l2_err("pm_runtime_put fail %d", ret);
+ pm_ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
+ if (pm_ret < 0)
+ mtk_v4l2_err("pm_runtime_put fail %d", pm_ret);
err_start_stream:
for (i = 0; i < q->num_buffers; ++i) {
@@ -882,9 +962,38 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
dst_buf->vb2_buf.planes[0].bytesused = 0;
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
}
+ /* STREAMOFF on the CAPTURE queue completes any ongoing flush */
+ if (ctx->is_flushing) {
+ struct v4l2_m2m_buffer *b, *n;
+
+ mtk_v4l2_debug(1, "STREAMOFF called while flushing");
+ /*
+ * STREAMOFF could be called before the flush buffer is
+ * dequeued. Check whether empty flush buf is still in
+ * queue before removing it.
+ */
+ v4l2_m2m_for_each_src_buf_safe(ctx->m2m_ctx, b, n) {
+ if (b == &ctx->empty_flush_buf) {
+ v4l2_m2m_src_buf_remove_by_buf(ctx->m2m_ctx, &b->vb);
+ break;
+ }
+ }
+ ctx->is_flushing = false;
+ }
} else {
- while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
+ if (src_buf != &ctx->empty_flush_buf.vb)
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ }
+ if (ctx->is_flushing) {
+ /*
+ * If we are in the middle of a flush, put the flush
+ * buffer back into the queue so the next CAPTURE
+ * buffer gets returned with the LAST flag set.
+ */
+ v4l2_m2m_buf_queue(ctx->m2m_ctx,
+ &ctx->empty_flush_buf.vb);
+ }
}
if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
@@ -984,12 +1093,15 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
{
struct venc_enc_param enc_prm;
struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- struct mtk_video_enc_buf *mtk_buf =
- container_of(vb2_v4l2, struct mtk_video_enc_buf,
- m2m_buf.vb);
-
+ struct mtk_video_enc_buf *mtk_buf;
int ret = 0;
+ /* Don't upcast the empty flush buffer */
+ if (vb2_v4l2 == &ctx->empty_flush_buf.vb)
+ return 0;
+
+ mtk_buf = container_of(vb2_v4l2, struct mtk_video_enc_buf, m2m_buf.vb);
+
memset(&enc_prm, 0, sizeof(enc_prm));
if (mtk_buf->param_change == MTK_ENCODE_PARAM_NONE)
return 0;
@@ -1075,6 +1187,20 @@ static void mtk_venc_worker(struct work_struct *work)
}
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+
+ /*
+ * If we see the flush buffer, send an empty buffer with the LAST flag
+ * to the client. is_flushing will be reset at the time the buffer
+ * is dequeued.
+ */
+ if (src_buf == &ctx->empty_flush_buf.vb) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
+ return;
+ }
+
memset(&frm_buf, 0, sizeof(frm_buf));
for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) {
frm_buf.fb_addr[i].dma_addr =
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index 45d1870c83dd..eed67394cf46 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -26,7 +26,7 @@
module_param(mtk_v4l2_dbg_level, int, S_IRUGO | S_IWUSR);
module_param(mtk_vcodec_dbg, bool, S_IRUGO | S_IWUSR);
-static const struct mtk_video_fmt mtk_video_formats_output_mt8173[] = {
+static const struct mtk_video_fmt mtk_video_formats_output[] = {
{
.fourcc = V4L2_PIX_FMT_NV12M,
.type = MTK_FMT_FRAME,
@@ -49,7 +49,7 @@ static const struct mtk_video_fmt mtk_video_formats_output_mt8173[] = {
},
};
-static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_avc[] = {
+static const struct mtk_video_fmt mtk_video_formats_capture_h264[] = {
{
.fourcc = V4L2_PIX_FMT_H264,
.type = MTK_FMT_ENC,
@@ -57,7 +57,7 @@ static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_avc[] = {
},
};
-static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_vp8[] = {
+static const struct mtk_video_fmt mtk_video_formats_capture_vp8[] = {
{
.fourcc = V4L2_PIX_FMT_VP8,
.type = MTK_FMT_ENC,
@@ -65,14 +65,6 @@ static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_vp8[] = {
},
};
-static const struct mtk_video_fmt mtk_video_formats_capture_mt8183[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .type = MTK_FMT_ENC,
- .num_planes = 1,
- },
-};
-
/* Wake up context wait_queue */
static void wake_up_ctx(struct mtk_vcodec_ctx *ctx, unsigned int reason)
{
@@ -131,6 +123,7 @@ static int fops_vcodec_open(struct file *file)
struct mtk_vcodec_dev *dev = video_drvdata(file);
struct mtk_vcodec_ctx *ctx = NULL;
int ret = 0;
+ struct vb2_queue *src_vq;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -157,13 +150,16 @@ static int fops_vcodec_open(struct file *file)
goto err_ctrls_setup;
}
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_enc, ctx,
- &mtk_vcodec_enc_queue_init);
+ &mtk_vcodec_enc_queue_init);
if (IS_ERR((__force void *)ctx->m2m_ctx)) {
ret = PTR_ERR((__force void *)ctx->m2m_ctx);
mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
ret);
goto err_m2m_ctx_init;
}
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ ctx->empty_flush_buf.vb.vb2_buf.vb2_queue = src_vq;
mtk_vcodec_enc_set_default_params(ctx);
if (v4l2_fh_is_singular(&ctx->fh)) {
@@ -392,34 +388,33 @@ err_enc_pm:
static const struct mtk_vcodec_enc_pdata mt8173_avc_pdata = {
.chip = MTK_MT8173,
- .capture_formats = mtk_video_formats_capture_mt8173_avc,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8173_avc),
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
- .min_bitrate = 1,
- .max_bitrate = 4000000,
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
+ .min_bitrate = 64,
+ .max_bitrate = 60000000,
.core_id = VENC_SYS,
};
static const struct mtk_vcodec_enc_pdata mt8173_vp8_pdata = {
.chip = MTK_MT8173,
- .capture_formats = mtk_video_formats_capture_mt8173_vp8,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8173_vp8),
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .capture_formats = mtk_video_formats_capture_vp8,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_vp8),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
.min_bitrate = 64,
- .max_bitrate = 4000000,
+ .max_bitrate = 9000000,
.core_id = VENC_LT_SYS,
};
static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
.chip = MTK_MT8183,
.uses_ext = true,
- .capture_formats = mtk_video_formats_capture_mt8183,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8183),
- /* MT8183 supports the same output formats as MT8173 */
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
.min_bitrate = 64,
.max_bitrate = 40000000,
.core_id = VENC_SYS,
@@ -428,16 +423,27 @@ static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
static const struct mtk_vcodec_enc_pdata mt8192_pdata = {
.chip = MTK_MT8192,
.uses_ext = true,
- /* MT8192 supports the same capture formats as MT8183 */
- .capture_formats = mtk_video_formats_capture_mt8183,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8183),
- /* MT8192 supports the same output formats as MT8173 */
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
.min_bitrate = 64,
.max_bitrate = 100000000,
.core_id = VENC_SYS,
};
+
+static const struct mtk_vcodec_enc_pdata mt8195_pdata = {
+ .chip = MTK_MT8195,
+ .uses_ext = true,
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
+ .min_bitrate = 64,
+ .max_bitrate = 100000000,
+ .core_id = VENC_SYS,
+};
+
static const struct of_device_id mtk_vcodec_enc_match[] = {
{.compatible = "mediatek,mt8173-vcodec-enc",
.data = &mt8173_avc_pdata},
@@ -445,6 +451,7 @@ static const struct of_device_id mtk_vcodec_enc_match[] = {
.data = &mt8173_vp8_pdata},
{.compatible = "mediatek,mt8183-vcodec-enc", .data = &mt8183_pdata},
{.compatible = "mediatek,mt8192-vcodec-enc", .data = &mt8192_pdata},
+ {.compatible = "mediatek,mt8195-vcodec-enc", .data = &mt8195_pdata},
{},
};
MODULE_DEVICE_TABLE(of, mtk_vcodec_enc_match);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
new file mode 100644
index 000000000000..946c23088308
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
@@ -0,0 +1,774 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-h264.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_drv_base.h"
+#include "../vdec_drv_if.h"
+#include "../vdec_vpu_if.h"
+
+#define BUF_PREDICTION_SZ (64 * 4096)
+#define MB_UNIT_LEN 16
+
+/* get used parameters for sps/pps */
+#define GET_MTK_VDEC_FLAG(cond, flag) \
+ { dst_param->cond = ((src_param->flags & (flag)) ? (1) : (0)); }
+#define GET_MTK_VDEC_PARAM(param) \
+ { dst_param->param = src_param->param; }
+/* motion vector size (bytes) for every macro block */
+#define HW_MB_STORE_SZ 64
+
+#define H264_MAX_FB_NUM 17
+#define H264_MAX_MV_NUM 32
+#define HDR_PARSING_BUF_SZ 1024
+
+/**
+ * struct mtk_h264_dpb_info - h264 dpb information
+ * @y_dma_addr: Y bitstream physical address
+ * @c_dma_addr: CbCr bitstream physical address
+ * @reference_flag: reference picture flag (short/long term reference picture)
+ * @field: field picture flag
+ */
+struct mtk_h264_dpb_info {
+ dma_addr_t y_dma_addr;
+ dma_addr_t c_dma_addr;
+ int reference_flag;
+ int field;
+};
+
+/*
+ * struct mtk_h264_sps_param - parameters for sps
+ */
+struct mtk_h264_sps_param {
+ unsigned char chroma_format_idc;
+ unsigned char bit_depth_luma_minus8;
+ unsigned char bit_depth_chroma_minus8;
+ unsigned char log2_max_frame_num_minus4;
+ unsigned char pic_order_cnt_type;
+ unsigned char log2_max_pic_order_cnt_lsb_minus4;
+ unsigned char max_num_ref_frames;
+ unsigned char separate_colour_plane_flag;
+ unsigned short pic_width_in_mbs_minus1;
+ unsigned short pic_height_in_map_units_minus1;
+ unsigned int max_frame_nums;
+ unsigned char qpprime_y_zero_transform_bypass_flag;
+ unsigned char delta_pic_order_always_zero_flag;
+ unsigned char frame_mbs_only_flag;
+ unsigned char mb_adaptive_frame_field_flag;
+ unsigned char direct_8x8_inference_flag;
+ unsigned char reserved[3];
+};
+
+/*
+ * struct mtk_h264_pps_param - parameters for pps
+ */
+struct mtk_h264_pps_param {
+ unsigned char num_ref_idx_l0_default_active_minus1;
+ unsigned char num_ref_idx_l1_default_active_minus1;
+ unsigned char weighted_bipred_idc;
+ char pic_init_qp_minus26;
+ char chroma_qp_index_offset;
+ char second_chroma_qp_index_offset;
+ unsigned char entropy_coding_mode_flag;
+ unsigned char pic_order_present_flag;
+ unsigned char deblocking_filter_control_present_flag;
+ unsigned char constrained_intra_pred_flag;
+ unsigned char weighted_pred_flag;
+ unsigned char redundant_pic_cnt_present_flag;
+ unsigned char transform_8x8_mode_flag;
+ unsigned char scaling_matrix_present_flag;
+ unsigned char reserved[2];
+};
+
+struct slice_api_h264_scaling_matrix {
+ unsigned char scaling_list_4x4[6][16];
+ unsigned char scaling_list_8x8[6][64];
+};
+
+struct slice_h264_dpb_entry {
+ unsigned long long reference_ts;
+ unsigned short frame_num;
+ unsigned short pic_num;
+ /* Note that field is indicated by v4l2_buffer.field */
+ int top_field_order_cnt;
+ int bottom_field_order_cnt;
+ unsigned int flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */
+};
+
+/*
+ * struct slice_api_h264_decode_param - parameters for decode.
+ */
+struct slice_api_h264_decode_param {
+ struct slice_h264_dpb_entry dpb[16];
+ unsigned short num_slices;
+ unsigned short nal_ref_idc;
+ unsigned char ref_pic_list_p0[32];
+ unsigned char ref_pic_list_b0[32];
+ unsigned char ref_pic_list_b1[32];
+ int top_field_order_cnt;
+ int bottom_field_order_cnt;
+ unsigned int flags; /* V4L2_H264_DECODE_PARAM_FLAG_* */
+};
+
+/*
+ * struct mtk_h264_dec_slice_param - parameters for decode current frame
+ */
+struct mtk_h264_dec_slice_param {
+ struct mtk_h264_sps_param sps;
+ struct mtk_h264_pps_param pps;
+ struct slice_api_h264_scaling_matrix scaling_matrix;
+ struct slice_api_h264_decode_param decode_params;
+ struct mtk_h264_dpb_info h264_dpb_info[16];
+};
+
+/**
+ * struct h264_fb - h264 decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct h264_fb {
+ u64 vdec_fb_va;
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ s32 poc;
+ u32 reserved;
+};
+
+/**
+ * struct vdec_h264_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @realloc_mv_buf : flag to notify driver to re-allocate mv buffer
+ * @cap_num_planes : number planes of capture buffer
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_h264_dec_info {
+ u32 dpb_sz;
+ u32 resolution_changed;
+ u32 realloc_mv_buf;
+ u32 cap_num_planes;
+ u64 bs_dma;
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ u64 vdec_fb_va;
+};
+
+/**
+ * struct vdec_h264_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
+ * @mv_buf_dma : HW working motion vector buffer dma address (AP-W, VPU-R)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ * @h264_slice_params : the parameters that hardware use to decode
+ */
+struct vdec_h264_vsi {
+ u64 pred_buf_dma;
+ u64 mv_buf_dma[H264_MAX_MV_NUM];
+ struct vdec_h264_dec_info dec;
+ struct vdec_pic_info pic;
+ struct v4l2_rect crop;
+ struct mtk_h264_dec_slice_param h264_slice_params;
+};
+
+/**
+ * struct vdec_h264_slice_inst - h264 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to mtk_vcodec_ctx
+ * @pred_buf : HW working predication buffer
+ * @mv_buf : HW working motion vector buffer
+ * @vpu : VPU instance
+ * @vsi_ctx : Local VSI data for this decoding context
+ * @h264_slice_param : the parameters that hardware use to decode
+ * @dpb : decoded picture buffer used to store reference buffer information
+ */
+struct vdec_h264_slice_inst {
+ unsigned int num_nalu;
+ struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_mem pred_buf;
+ struct mtk_vcodec_mem mv_buf[H264_MAX_MV_NUM];
+ struct vdec_vpu_inst vpu;
+ struct vdec_h264_vsi vsi_ctx;
+ struct mtk_h264_dec_slice_param h264_slice_param;
+
+ struct v4l2_h264_dpb_entry dpb[16];
+};
+
+static void *get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
+
+ return ctrl->p_cur.p;
+}
+
+static void get_h264_dpb_list(struct vdec_h264_slice_inst *inst,
+ struct mtk_h264_dec_slice_param *slice_param)
+{
+ struct vb2_queue *vq;
+ struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ u64 index;
+
+ vq = v4l2_m2m_get_vq(inst->ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ for (index = 0; index < ARRAY_SIZE(slice_param->decode_params.dpb); index++) {
+ const struct slice_h264_dpb_entry *dpb;
+ int vb2_index;
+
+ dpb = &slice_param->decode_params.dpb[index];
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)) {
+ slice_param->h264_dpb_info[index].reference_flag = 0;
+ continue;
+ }
+
+ vb2_index = vb2_find_timestamp(vq, dpb->reference_ts, 0);
+ if (vb2_index < 0) {
+ mtk_vcodec_err(inst, "Reference invalid: dpb_index(%lld) reference_ts(%lld)",
+ index, dpb->reference_ts);
+ continue;
+ }
+ /* 1 for short term reference, 2 for long term reference */
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
+ slice_param->h264_dpb_info[index].reference_flag = 1;
+ else
+ slice_param->h264_dpb_info[index].reference_flag = 2;
+
+ vb = vq->bufs[vb2_index];
+ vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ slice_param->h264_dpb_info[index].field = vb2_v4l2->field;
+
+ slice_param->h264_dpb_info[index].y_dma_addr =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2) {
+ slice_param->h264_dpb_info[index].c_dma_addr =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ }
+ }
+}
+
+static void get_h264_sps_parameters(struct mtk_h264_sps_param *dst_param,
+ const struct v4l2_ctrl_h264_sps *src_param)
+{
+ GET_MTK_VDEC_PARAM(chroma_format_idc);
+ GET_MTK_VDEC_PARAM(bit_depth_luma_minus8);
+ GET_MTK_VDEC_PARAM(bit_depth_chroma_minus8);
+ GET_MTK_VDEC_PARAM(log2_max_frame_num_minus4);
+ GET_MTK_VDEC_PARAM(pic_order_cnt_type);
+ GET_MTK_VDEC_PARAM(log2_max_pic_order_cnt_lsb_minus4);
+ GET_MTK_VDEC_PARAM(max_num_ref_frames);
+ GET_MTK_VDEC_PARAM(pic_width_in_mbs_minus1);
+ GET_MTK_VDEC_PARAM(pic_height_in_map_units_minus1);
+
+ GET_MTK_VDEC_FLAG(separate_colour_plane_flag,
+ V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE);
+ GET_MTK_VDEC_FLAG(qpprime_y_zero_transform_bypass_flag,
+ V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS);
+ GET_MTK_VDEC_FLAG(delta_pic_order_always_zero_flag,
+ V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO);
+ GET_MTK_VDEC_FLAG(frame_mbs_only_flag,
+ V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY);
+ GET_MTK_VDEC_FLAG(mb_adaptive_frame_field_flag,
+ V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+ GET_MTK_VDEC_FLAG(direct_8x8_inference_flag,
+ V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE);
+}
+
+static void get_h264_pps_parameters(struct mtk_h264_pps_param *dst_param,
+ const struct v4l2_ctrl_h264_pps *src_param)
+{
+ GET_MTK_VDEC_PARAM(num_ref_idx_l0_default_active_minus1);
+ GET_MTK_VDEC_PARAM(num_ref_idx_l1_default_active_minus1);
+ GET_MTK_VDEC_PARAM(weighted_bipred_idc);
+ GET_MTK_VDEC_PARAM(pic_init_qp_minus26);
+ GET_MTK_VDEC_PARAM(chroma_qp_index_offset);
+ GET_MTK_VDEC_PARAM(second_chroma_qp_index_offset);
+
+ GET_MTK_VDEC_FLAG(entropy_coding_mode_flag,
+ V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE);
+ GET_MTK_VDEC_FLAG(pic_order_present_flag,
+ V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT);
+ GET_MTK_VDEC_FLAG(weighted_pred_flag,
+ V4L2_H264_PPS_FLAG_WEIGHTED_PRED);
+ GET_MTK_VDEC_FLAG(deblocking_filter_control_present_flag,
+ V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT);
+ GET_MTK_VDEC_FLAG(constrained_intra_pred_flag,
+ V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED);
+ GET_MTK_VDEC_FLAG(redundant_pic_cnt_present_flag,
+ V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT);
+ GET_MTK_VDEC_FLAG(transform_8x8_mode_flag,
+ V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE);
+ GET_MTK_VDEC_FLAG(scaling_matrix_present_flag,
+ V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT);
+}
+
+static void
+get_h264_scaling_matrix(struct slice_api_h264_scaling_matrix *dst_matrix,
+ const struct v4l2_ctrl_h264_scaling_matrix *src_matrix)
+{
+ memcpy(dst_matrix->scaling_list_4x4, src_matrix->scaling_list_4x4,
+ sizeof(dst_matrix->scaling_list_4x4));
+
+ memcpy(dst_matrix->scaling_list_8x8, src_matrix->scaling_list_8x8,
+ sizeof(dst_matrix->scaling_list_8x8));
+}
+
+static void
+get_h264_decode_parameters(struct slice_api_h264_decode_param *dst_params,
+ const struct v4l2_ctrl_h264_decode_params *src_params,
+ const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES])
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dst_params->dpb); i++) {
+ struct slice_h264_dpb_entry *dst_entry = &dst_params->dpb[i];
+ const struct v4l2_h264_dpb_entry *src_entry = &dpb[i];
+
+ dst_entry->reference_ts = src_entry->reference_ts;
+ dst_entry->frame_num = src_entry->frame_num;
+ dst_entry->pic_num = src_entry->pic_num;
+ dst_entry->top_field_order_cnt = src_entry->top_field_order_cnt;
+ dst_entry->bottom_field_order_cnt =
+ src_entry->bottom_field_order_cnt;
+ dst_entry->flags = src_entry->flags;
+ }
+
+ /*
+ * num_slices is a leftover from the old H.264 support and is ignored
+ * by the firmware.
+ */
+ dst_params->num_slices = 0;
+ dst_params->nal_ref_idc = src_params->nal_ref_idc;
+ dst_params->top_field_order_cnt = src_params->top_field_order_cnt;
+ dst_params->bottom_field_order_cnt = src_params->bottom_field_order_cnt;
+ dst_params->flags = src_params->flags;
+}
+
+static bool dpb_entry_match(const struct v4l2_h264_dpb_entry *a,
+ const struct v4l2_h264_dpb_entry *b)
+{
+ return a->top_field_order_cnt == b->top_field_order_cnt &&
+ a->bottom_field_order_cnt == b->bottom_field_order_cnt;
+}
+
+/*
+ * Move DPB entries of dec_param that refer to a frame already existing in dpb
+ * into the already existing slot in dpb, and move other entries into new slots.
+ *
+ * This function is an adaptation of the similarly-named function in
+ * hantro_h264.c.
+ */
+static void update_dpb(const struct v4l2_ctrl_h264_decode_params *dec_param,
+ struct v4l2_h264_dpb_entry *dpb)
+{
+ DECLARE_BITMAP(new, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ DECLARE_BITMAP(in_use, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ DECLARE_BITMAP(used, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ unsigned int i, j;
+
+ /* Disable all entries by default, and mark the ones in use. */
+ for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ set_bit(i, in_use);
+ dpb[i].flags &= ~V4L2_H264_DPB_ENTRY_FLAG_ACTIVE;
+ }
+
+ /* Try to match new DPB entries with existing ones by their POCs. */
+ for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+
+ if (!(ndpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ /*
+ * To cut off some comparisons, iterate only on target DPB
+ * entries were already used.
+ */
+ for_each_set_bit(j, in_use, ARRAY_SIZE(dec_param->dpb)) {
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ cdpb = &dpb[j];
+ if (!dpb_entry_match(cdpb, ndpb))
+ continue;
+
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ /* Don't reiterate on this one. */
+ clear_bit(j, in_use);
+ break;
+ }
+
+ if (j == ARRAY_SIZE(dec_param->dpb))
+ set_bit(i, new);
+ }
+
+ /* For entries that could not be matched, use remaining free slots. */
+ for_each_set_bit(i, new, ARRAY_SIZE(dec_param->dpb)) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ /*
+ * Both arrays are of the same sizes, so there is no way
+ * we can end up with no space in target array, unless
+ * something is buggy.
+ */
+ j = find_first_zero_bit(used, ARRAY_SIZE(dec_param->dpb));
+ if (WARN_ON(j >= ARRAY_SIZE(dec_param->dpb)))
+ return;
+
+ cdpb = &dpb[j];
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ }
+}
+
+/*
+ * The firmware expects unused reflist entries to have the value 0x20.
+ */
+static void fixup_ref_list(u8 *ref_list, size_t num_valid)
+{
+ memset(&ref_list[num_valid], 0x20, 32 - num_valid);
+}
+
+static void get_vdec_decode_parameters(struct vdec_h264_slice_inst *inst)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_params =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ const struct v4l2_ctrl_h264_sps *sps =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SPS);
+ const struct v4l2_ctrl_h264_pps *pps =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_PPS);
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SCALING_MATRIX);
+ struct mtk_h264_dec_slice_param *slice_param = &inst->h264_slice_param;
+ struct v4l2_h264_reflist_builder reflist_builder;
+ u8 *p0_reflist = slice_param->decode_params.ref_pic_list_p0;
+ u8 *b0_reflist = slice_param->decode_params.ref_pic_list_b0;
+ u8 *b1_reflist = slice_param->decode_params.ref_pic_list_b1;
+
+ update_dpb(dec_params, inst->dpb);
+
+ get_h264_sps_parameters(&slice_param->sps, sps);
+ get_h264_pps_parameters(&slice_param->pps, pps);
+ get_h264_scaling_matrix(&slice_param->scaling_matrix, scaling_matrix);
+ get_h264_decode_parameters(&slice_param->decode_params, dec_params,
+ inst->dpb);
+ get_h264_dpb_list(inst, slice_param);
+
+ /* Build the reference lists */
+ v4l2_h264_init_reflist_builder(&reflist_builder, dec_params, sps,
+ inst->dpb);
+ v4l2_h264_build_p_ref_list(&reflist_builder, p0_reflist);
+ v4l2_h264_build_b_ref_lists(&reflist_builder, b0_reflist, b1_reflist);
+ /* Adapt the built lists to the firmware's expectations */
+ fixup_ref_list(p0_reflist, reflist_builder.num_valid);
+ fixup_ref_list(b0_reflist, reflist_builder.num_valid);
+ fixup_ref_list(b1_reflist, reflist_builder.num_valid);
+
+ memcpy(&inst->vsi_ctx.h264_slice_params, slice_param,
+ sizeof(inst->vsi_ctx.h264_slice_params));
+}
+
+static unsigned int get_mv_buf_size(unsigned int width, unsigned int height)
+{
+ int unit_size = (width / MB_UNIT_LEN) * (height / MB_UNIT_LEN) + 8;
+
+ return HW_MB_STORE_SZ * unit_size;
+}
+
+static int allocate_predication_buf(struct vdec_h264_slice_inst *inst)
+{
+ int err;
+
+ inst->pred_buf.size = BUF_PREDICTION_SZ;
+ err = mtk_vcodec_mem_alloc(inst->ctx, &inst->pred_buf);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate ppl buf");
+ return err;
+ }
+
+ inst->vsi_ctx.pred_buf_dma = inst->pred_buf.dma_addr;
+ return 0;
+}
+
+static void free_predication_buf(struct vdec_h264_slice_inst *inst)
+{
+ struct mtk_vcodec_mem *mem = &inst->pred_buf;
+
+ mtk_vcodec_debug_enter(inst);
+
+ inst->vsi_ctx.pred_buf_dma = 0;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+}
+
+static int alloc_mv_buf(struct vdec_h264_slice_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ int i;
+ int err;
+ struct mtk_vcodec_mem *mem = NULL;
+ unsigned int buf_sz = get_mv_buf_size(pic->buf_w, pic->buf_h);
+
+ mtk_v4l2_debug(3, "size = 0x%lx", buf_sz);
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ mem->size = buf_sz;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate mv buf");
+ return err;
+ }
+ inst->vsi_ctx.mv_buf_dma[i] = mem->dma_addr;
+ }
+
+ return 0;
+}
+
+static void free_mv_buf(struct vdec_h264_slice_inst *inst)
+{
+ int i;
+ struct mtk_vcodec_mem *mem;
+
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ inst->vsi_ctx.mv_buf_dma[i] = 0;
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ }
+}
+
+static void get_pic_info(struct vdec_h264_slice_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ ctx->picinfo.buf_w = (ctx->picinfo.pic_w + 15) & 0xFFFFFFF0;
+ ctx->picinfo.buf_h = (ctx->picinfo.pic_h + 31) & 0xFFFFFFE0;
+ ctx->picinfo.fb_sz[0] = ctx->picinfo.buf_w * ctx->picinfo.buf_h;
+ ctx->picinfo.fb_sz[1] = ctx->picinfo.fb_sz[0] >> 1;
+ inst->vsi_ctx.dec.cap_num_planes =
+ ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes;
+
+ *pic = ctx->picinfo;
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h);
+ mtk_vcodec_debug(inst, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
+ ctx->picinfo.fb_sz[1]);
+
+ if (ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w ||
+ ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h) {
+ inst->vsi_ctx.dec.resolution_changed = true;
+ if (ctx->last_decoded_picinfo.buf_w != ctx->picinfo.buf_w ||
+ ctx->last_decoded_picinfo.buf_h != ctx->picinfo.buf_h)
+ inst->vsi_ctx.dec.realloc_mv_buf = true;
+
+ mtk_v4l2_debug(1, "ResChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
+ inst->vsi_ctx.dec.resolution_changed,
+ inst->vsi_ctx.dec.realloc_mv_buf,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+ }
+}
+
+static void get_crop_info(struct vdec_h264_slice_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi_ctx.crop.left;
+ cr->top = inst->vsi_ctx.crop.top;
+ cr->width = inst->vsi_ctx.crop.width;
+ cr->height = inst->vsi_ctx.crop.height;
+
+ mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_h264_slice_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = inst->vsi_ctx.dec.dpb_sz;
+ mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_h264_slice_inst *inst;
+ int err;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = SCP_IPI_VDEC_H264;
+ inst->vpu.ctx = ctx;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ memcpy(&inst->vsi_ctx, inst->vpu.vsi, sizeof(inst->vsi_ctx));
+ inst->vsi_ctx.dec.resolution_changed = true;
+ inst->vsi_ctx.dec.realloc_mv_buf = true;
+
+ err = allocate_predication_buf(inst);
+ if (err)
+ goto error_deinit;
+
+ mtk_vcodec_debug(inst, "struct size = %d,%d,%d,%d\n",
+ sizeof(struct mtk_h264_sps_param),
+ sizeof(struct mtk_h264_pps_param),
+ sizeof(struct mtk_h264_dec_slice_param),
+ sizeof(struct mtk_h264_dpb_info));
+
+ mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
+
+ ctx->drv_handle = inst;
+ return 0;
+
+error_deinit:
+ vpu_dec_deinit(&inst->vpu);
+
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static void vdec_h264_slice_deinit(void *h_vdec)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ free_predication_buf(inst);
+ free_mv_buf(inst);
+
+ kfree(inst);
+}
+
+static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+ const struct v4l2_ctrl_h264_decode_params *dec_params =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ u32 data[2];
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ int err;
+
+ /* bs NULL means flush decoder */
+ if (!bs)
+ return vpu_dec_reset(vpu);
+
+ y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+ mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+ ++inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+
+ inst->vsi_ctx.dec.bs_dma = (uint64_t)bs->dma_addr;
+ inst->vsi_ctx.dec.y_fb_dma = y_fb_dma;
+ inst->vsi_ctx.dec.c_fb_dma = c_fb_dma;
+ inst->vsi_ctx.dec.vdec_fb_va = (u64)(uintptr_t)fb;
+
+ get_vdec_decode_parameters(inst);
+ data[0] = bs->size;
+ /*
+ * Reconstruct the first byte of the NAL unit, as the firmware requests
+ * that information to be passed even though it is present in the stream
+ * itself...
+ */
+ data[1] = (dec_params->nal_ref_idc << 5) |
+ ((dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC)
+ ? 0x5 : 0x1);
+
+ *res_chg = inst->vsi_ctx.dec.resolution_changed;
+ if (*res_chg) {
+ mtk_vcodec_debug(inst, "- resolution changed -");
+ if (inst->vsi_ctx.dec.realloc_mv_buf) {
+ err = alloc_mv_buf(inst, &inst->ctx->picinfo);
+ inst->vsi_ctx.dec.realloc_mv_buf = false;
+ if (err)
+ goto err_free_fb_out;
+ }
+ *res_chg = false;
+ }
+
+ memcpy(inst->vpu.vsi, &inst->vsi_ctx, sizeof(inst->vsi_ctx));
+ err = vpu_dec_start(vpu, data, 2);
+ if (err)
+ goto err_free_fb_out;
+
+ /* wait decoder done interrupt */
+ err = mtk_vcodec_wait_for_done_ctx(inst->ctx,
+ MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+ if (err)
+ goto err_free_fb_out;
+ vpu_dec_end(vpu);
+
+ memcpy(&inst->vsi_ctx, inst->vpu.vsi, sizeof(inst->vsi_ctx));
+ mtk_vcodec_debug(inst, "\n - NALU[%d]", inst->num_nalu);
+ return 0;
+
+err_free_fb_out:
+ mtk_vcodec_err(inst, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
+ return err;
+}
+
+static int vdec_h264_slice_get_param(void *h_vdec, enum vdec_get_param_type type, void *out)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+
+ switch (type) {
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct vdec_common_if vdec_h264_slice_if = {
+ .init = vdec_h264_slice_init,
+ .decode = vdec_h264_slice_decode,
+ .get_param = vdec_h264_slice_get_param,
+ .deinit = vdec_h264_slice_deinit,
+};
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
index b18743b906ea..42008243ceac 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
@@ -19,6 +19,9 @@ int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
int ret = 0;
switch (fourcc) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ ctx->dec_if = &vdec_h264_slice_if;
+ break;
case V4L2_PIX_FMT_H264:
ctx->dec_if = &vdec_h264_if;
break;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
index ec8f4e8d3d23..d467e8af4a84 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -55,6 +55,7 @@ struct vdec_fb_node {
};
extern const struct vdec_common_if vdec_h264_if;
+extern const struct vdec_common_if vdec_h264_slice_if;
extern const struct vdec_common_if vdec_vp8_if;
extern const struct vdec_common_if vdec_vp9_if;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
index 68e8d5cb16d7..5f45a537beb4 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
@@ -29,11 +29,15 @@ enum vdec_ipi_msgid {
/**
* struct vdec_ap_ipi_cmd - generic AP to VPU ipi command format
* @msg_id : vdec_ipi_msgid
- * @vpu_inst_addr : VPU decoder instance address
+ * @vpu_inst_addr : VPU decoder instance address. Used if ABI version < 2.
+ * @inst_id : instance ID. Used if the ABI version >= 2.
*/
struct vdec_ap_ipi_cmd {
uint32_t msg_id;
- uint32_t vpu_inst_addr;
+ union {
+ uint32_t vpu_inst_addr;
+ uint32_t inst_id;
+ };
};
/**
@@ -63,7 +67,8 @@ struct vdec_ap_ipi_init {
/**
* struct vdec_ap_ipi_dec_start - for AP_IPIMSG_DEC_START
* @msg_id : AP_IPIMSG_DEC_START
- * @vpu_inst_addr : VPU decoder instance address
+ * @vpu_inst_addr : VPU decoder instance address. Used if ABI version < 2.
+ * @inst_id : instance ID. Used if the ABI version >= 2.
* @data : Header info
* H264 decoder [0]:buf_sz [1]:nal_start
* VP8 decoder [0]:width/height
@@ -72,7 +77,10 @@ struct vdec_ap_ipi_init {
*/
struct vdec_ap_ipi_dec_start {
uint32_t msg_id;
- uint32_t vpu_inst_addr;
+ union {
+ uint32_t vpu_inst_addr;
+ uint32_t inst_id;
+ };
uint32_t data[3];
uint32_t reserved;
};
@@ -83,12 +91,19 @@ struct vdec_ap_ipi_dec_start {
* @status : VPU exeuction result
* @ap_inst_addr : AP vcodec_vpu_inst instance address
* @vpu_inst_addr : VPU decoder instance address
+ * @vdec_abi_version: ABI version of the firmware. Kernel can use it to
+ * ensure that it is compatible with the firmware.
+ * This field is not valid for MT8173 and must not be
+ * accessed for this chip.
+ * @inst_id : instance ID. Valid only if the ABI version >= 2.
*/
struct vdec_vpu_ipi_init_ack {
uint32_t msg_id;
int32_t status;
uint64_t ap_inst_addr;
uint32_t vpu_inst_addr;
+ uint32_t vdec_abi_version;
+ uint32_t inst_id;
};
#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 58b0e6fa8fd2..5dffc459a33d 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -24,6 +24,34 @@ static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
vpu->inst_addr = msg->vpu_inst_addr;
mtk_vcodec_debug(vpu, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
+
+ /* Set default ABI version if dealing with unversioned firmware. */
+ vpu->fw_abi_version = 0;
+ /*
+ * Instance ID is only used if ABI version >= 2. Initialize it with
+ * garbage by default.
+ */
+ vpu->inst_id = 0xdeadbeef;
+
+ /* Firmware version field does not exist on MT8173. */
+ if (vpu->ctx->dev->vdec_pdata->chip == MTK_MT8173)
+ return;
+
+ /* Check firmware version. */
+ vpu->fw_abi_version = msg->vdec_abi_version;
+ mtk_vcodec_debug(vpu, "firmware version 0x%x\n", vpu->fw_abi_version);
+ switch (vpu->fw_abi_version) {
+ case 1:
+ break;
+ case 2:
+ vpu->inst_id = msg->inst_id;
+ break;
+ default:
+ mtk_vcodec_err(vpu, "unhandled firmware version 0x%x\n",
+ vpu->fw_abi_version);
+ vpu->failure = 1;
+ break;
+ }
}
/*
@@ -44,6 +72,9 @@ static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
mtk_vcodec_debug(vpu, "+ id=%X", msg->msg_id);
+ vpu->failure = msg->status;
+ vpu->signaled = 1;
+
if (msg->status == 0) {
switch (msg->msg_id) {
case VPU_IPIMSG_DEC_INIT_ACK:
@@ -63,8 +94,6 @@ static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
}
mtk_vcodec_debug(vpu, "- id=%X", msg->msg_id);
- vpu->failure = msg->status;
- vpu->signaled = 1;
}
static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
@@ -96,7 +125,10 @@ static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
memset(&msg, 0, sizeof(msg));
msg.msg_id = msg_id;
- msg.vpu_inst_addr = vpu->inst_addr;
+ if (vpu->fw_abi_version < 2)
+ msg.vpu_inst_addr = vpu->inst_addr;
+ else
+ msg.inst_id = vpu->inst_id;
err = vcodec_vpu_send_msg(vpu, &msg, sizeof(msg));
mtk_vcodec_debug(vpu, "- id=%X ret=%d", msg_id, err);
@@ -146,7 +178,10 @@ int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
memset(&msg, 0, sizeof(msg));
msg.msg_id = AP_IPIMSG_DEC_START;
- msg.vpu_inst_addr = vpu->inst_addr;
+ if (vpu->fw_abi_version < 2)
+ msg.vpu_inst_addr = vpu->inst_addr;
+ else
+ msg.inst_id = vpu->inst_id;
for (i = 0; i < len; i++)
msg.data[i] = data[i];
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index 85224eb7e34b..c2ed5b6cab8b 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -18,6 +18,9 @@ struct mtk_vcodec_ctx;
* for control and info share
* @failure : VPU execution result status, 0: success, others: fail
* @inst_addr : VPU decoder instance address
+ * @fw_abi_version : ABI version of the firmware.
+ * @inst_id : if fw_abi_version >= 2, contains the instance ID to be given
+ * in place of inst_addr in messages.
* @signaled : 1 - Host has received ack message from VPU, 0 - not received
* @ctx : context for v4l2 layer integration
* @dev : platform device of VPU
@@ -29,6 +32,8 @@ struct vdec_vpu_inst {
void *vsi;
int32_t failure;
uint32_t inst_addr;
+ uint32_t fw_abi_version;
+ uint32_t inst_id;
unsigned int signaled;
struct mtk_vcodec_ctx *ctx;
wait_queue_head_t wq;
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index ec290dde59cf..7f1647da0ade 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -848,7 +848,8 @@ static int mtk_vpu_probe(struct platform_device *pdev)
vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt");
if (!vpu->wdt.wq) {
dev_err(dev, "initialize wdt workqueue failed\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto clk_unprepare;
}
INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func);
mutex_init(&vpu->vpu_mutex);
@@ -942,6 +943,8 @@ disable_vpu_clk:
vpu_clock_disable(vpu);
workqueue_destroy:
destroy_workqueue(vpu->wdt.wq);
+clk_unprepare:
+ clk_unprepare(vpu->clk);
return ret;
}
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 08a5473b5610..3ce84d0f078c 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -804,7 +804,6 @@ static int emmaprp_probe(struct platform_device *pdev)
{
struct emmaprp_dev *pcdev;
struct video_device *vfd;
- struct resource *res;
int irq, ret;
pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
@@ -822,8 +821,7 @@ static int emmaprp_probe(struct platform_device *pdev)
if (IS_ERR(pcdev->clk_emma_ahb))
return PTR_ERR(pcdev->clk_emma_ahb);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pcdev->base_emma = devm_ioremap_resource(&pdev->dev, res);
+ pcdev->base_emma = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcdev->base_emma))
return PTR_ERR(pcdev->base_emma);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 21193f0b7f61..3e0d9af7ffec 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -277,7 +277,7 @@ static int video_mode_to_dss_mode(struct omap_vout_device *vout)
*/
static int omapvid_setup_overlay(struct omap_vout_device *vout,
struct omap_overlay *ovl, int posx, int posy, int outw,
- int outh, u32 addr)
+ int outh, dma_addr_t addr)
{
int ret = 0;
struct omap_overlay_info info;
@@ -352,7 +352,7 @@ setup_ovl_err:
/*
* Initialize the overlay structure
*/
-static int omapvid_init(struct omap_vout_device *vout, u32 addr)
+static int omapvid_init(struct omap_vout_device *vout, dma_addr_t addr)
{
int ret = 0, i;
struct v4l2_window *win;
@@ -479,7 +479,8 @@ err:
static void omap_vout_isr(void *arg, unsigned int irqstatus)
{
int ret, fid, mgr_id;
- u32 addr, irq;
+ dma_addr_t addr;
+ u32 irq;
struct omap_overlay *ovl;
u64 ts;
struct omapvideo_info *ovid;
@@ -543,7 +544,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
struct omap_vout_buffer, queue);
list_del(&vout->next_frm->queue);
- addr = (unsigned long)vout->queued_buf_addr[vout->next_frm->vbuf.vb2_buf.index]
+ addr = vout->queued_buf_addr[vout->next_frm->vbuf.vb2_buf.index]
+ vout->cropped_offset;
/* First save the configuration in ovelray structure */
@@ -976,7 +977,7 @@ static int omap_vout_vb2_prepare(struct vb2_buffer *vb)
vb2_set_plane_payload(vb, 0, vout->pix.sizeimage);
voutbuf->vbuf.field = V4L2_FIELD_NONE;
- vout->queued_buf_addr[vb->index] = (u8 *)buf_phy_addr;
+ vout->queued_buf_addr[vb->index] = buf_phy_addr;
if (ovid->rotation_type == VOUT_ROT_VRFB)
return omap_vout_prepare_vrfb(vout, vb);
return 0;
@@ -995,7 +996,8 @@ static int omap_vout_vb2_start_streaming(struct vb2_queue *vq, unsigned int coun
struct omap_vout_device *vout = vb2_get_drv_priv(vq);
struct omapvideo_info *ovid = &vout->vid_info;
struct omap_vout_buffer *buf, *tmp;
- u32 addr = 0, mask = 0;
+ dma_addr_t addr = 0;
+ u32 mask = 0;
int ret, j;
/* Get the next frame from the buffer queue */
@@ -1018,7 +1020,7 @@ static int omap_vout_vb2_start_streaming(struct vb2_queue *vq, unsigned int coun
goto out;
}
- addr = (unsigned long)vout->queued_buf_addr[vout->cur_frm->vbuf.vb2_buf.index]
+ addr = vout->queued_buf_addr[vout->cur_frm->vbuf.vb2_buf.index]
+ vout->cropped_offset;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
@@ -1476,7 +1478,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
* To be precise: fbuf.base should match smem_start of
* struct fb_fix_screeninfo.
*/
- vout->fbuf.base = (void *)info.paddr;
+ vout->fbuf.base = (void *)(uintptr_t)info.paddr;
/* Set VRFB as rotation_type for omap2 and omap3 */
if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx())
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 6bd672cbdb62..0cfa0169875f 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -305,7 +305,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
/* Store buffers physical address into an array. Addresses
* from this array will be used to configure DSS */
rotation = calc_rotation(vout);
- vout->queued_buf_addr[vb->index] = (u8 *)
+ vout->queued_buf_addr[vb->index] =
vout->vrfb_context[vb->index].paddr[rotation];
return 0;
}
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 1cff6dea1879..b586193341d2 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -170,7 +170,7 @@ struct omap_vout_device {
struct omap_vout_buffer *cur_frm, *next_frm;
spinlock_t vbq_lock; /* spinlock for dma_queue */
struct list_head dma_queue;
- u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+ dma_addr_t queued_buf_addr[VIDEO_MAX_FRAME];
u32 cropped_offset;
s32 tv_field1_offset;
void *isr_handle;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 20f59c59ff8a..6de377ce281d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2003,7 +2003,7 @@ static int isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&isp->notifier);
+ v4l2_async_nf_unregister(&isp->notifier);
isp_unregister_entities(isp);
isp_cleanup_modules(isp);
isp_xclk_cleanup(isp);
@@ -2013,7 +2013,7 @@ static int isp_remove(struct platform_device *pdev)
__omap3isp_put(isp, false);
media_entity_enum_cleanup(&isp->crashed);
- v4l2_async_notifier_cleanup(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
kfree(isp);
@@ -2172,8 +2172,9 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (!ret) {
- isd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isp->notifier, ep, struct isp_async_subdev);
+ isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
+ ep, struct
+ isp_async_subdev);
if (!IS_ERR(isd))
isp_parse_of_parallel_endpoint(isp->dev, &vep, &isd->bus);
}
@@ -2211,8 +2212,10 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
}
if (!ret) {
- isd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isp->notifier, ep, struct isp_async_subdev);
+ isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
+ ep,
+ struct
+ isp_async_subdev);
if (!IS_ERR(isd)) {
switch (vep.bus_type) {
@@ -2289,7 +2292,7 @@ static int isp_probe(struct platform_device *pdev)
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
- v4l2_async_notifier_init(&isp->notifier);
+ v4l2_async_nf_init(&isp->notifier);
isp->dev = &pdev->dev;
ret = isp_parse_of_endpoints(isp);
@@ -2418,7 +2421,7 @@ static int isp_probe(struct platform_device *pdev)
isp->notifier.ops = &isp_subdev_notifier_ops;
- ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
+ ret = v4l2_async_nf_register(&isp->v4l2_dev, &isp->notifier);
if (ret)
goto error_register_entities;
@@ -2437,7 +2440,7 @@ error_isp:
isp_xclk_cleanup(isp);
__omap3isp_put(isp, false);
error:
- v4l2_async_notifier_cleanup(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
mutex_destroy(&isp->isp_mutex);
error_release_isp:
kfree(isp);
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index ec4c010644ca..3ba00b0f9320 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2249,10 +2249,9 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &pcdev->notifier,
- of_fwnode_handle(np),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&pcdev->notifier,
+ of_fwnode_handle(np),
+ struct v4l2_async_subdev);
if (IS_ERR(asd))
err = PTR_ERR(asd);
out:
@@ -2289,7 +2288,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
if (IS_ERR(pcdev->clk))
return PTR_ERR(pcdev->clk);
- v4l2_async_notifier_init(&pcdev->notifier);
+ v4l2_async_nf_init(&pcdev->notifier);
pcdev->res = res;
pcdev->pdata = pdev->dev.platform_data;
if (pcdev->pdata) {
@@ -2297,11 +2296,10 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->platform_flags = pcdev->pdata->flags;
pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
- asd = v4l2_async_notifier_add_i2c_subdev(
- &pcdev->notifier,
- pcdev->pdata->sensor_i2c_adapter_id,
- pcdev->pdata->sensor_i2c_address,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_i2c(&pcdev->notifier,
+ pcdev->pdata->sensor_i2c_adapter_id,
+ pcdev->pdata->sensor_i2c_address,
+ struct v4l2_async_subdev);
if (IS_ERR(asd))
err = PTR_ERR(asd);
} else if (pdev->dev.of_node) {
@@ -2402,13 +2400,13 @@ static int pxa_camera_probe(struct platform_device *pdev)
goto exit_notifier_cleanup;
pcdev->notifier.ops = &pxa_camera_sensor_ops;
- err = v4l2_async_notifier_register(&pcdev->v4l2_dev, &pcdev->notifier);
+ err = v4l2_async_nf_register(&pcdev->v4l2_dev, &pcdev->notifier);
if (err)
goto exit_notifier_cleanup;
return 0;
exit_notifier_cleanup:
- v4l2_async_notifier_cleanup(&pcdev->notifier);
+ v4l2_async_nf_cleanup(&pcdev->notifier);
v4l2_device_unregister(&pcdev->v4l2_dev);
exit_deactivate:
pxa_camera_deactivate(pcdev);
@@ -2432,8 +2430,8 @@ static int pxa_camera_remove(struct platform_device *pdev)
dma_release_channel(pcdev->dma_chans[1]);
dma_release_channel(pcdev->dma_chans[2]);
- v4l2_async_notifier_unregister(&pcdev->notifier);
- v4l2_async_notifier_cleanup(&pcdev->notifier);
+ v4l2_async_nf_unregister(&pcdev->notifier);
+ v4l2_async_nf_cleanup(&pcdev->notifier);
v4l2_device_unregister(&pcdev->v4l2_dev);
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
index 8594d275b41d..5c083d70d495 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
@@ -177,7 +177,7 @@
#define VFE_BUS_WM_FRAME_INC(n) (0x2258 + (n) * 0x100)
#define VFE_BUS_WM_BURST_LIMIT(n) (0x225c + (n) * 0x100)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
@@ -185,7 +185,10 @@ static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
u32 rev = (hw_version >> 16) & 0xFFF;
u32 step = hw_version & 0xFFFF;
- dev_err(dev, "VFE HW Version = %u.%u.%u\n", gen, rev, step);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n",
+ gen, rev, step);
+
+ return hw_version;
}
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
@@ -771,7 +774,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_170 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index 53c56a8d4545..42047b11ba52 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -210,11 +210,13 @@
#define MSM_VFE_VFE0_UB_SIZE 1023
#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_dbg(dev, "VFE HW Version = 0x%08x\n", hw_version);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
+
+ return hw_version;
}
static u16 vfe_get_ub_size(u8 vfe_id)
@@ -288,22 +290,14 @@ static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
- switch (pix->pixelformat) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+
+ if (pix->pixelformat == V4L2_PIX_FMT_NV12 ||
+ pix->pixelformat == V4L2_PIX_FMT_NV21)
if (plane == 1)
*height /= 2;
- break;
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
- break;
- }
}
static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
@@ -1004,7 +998,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_4_1 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index a59635217758..ab2d57bdf5e7 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -254,11 +254,13 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_err(dev, "VFE HW Version = 0x%08x\n", hw_version);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
+
+ return hw_version;
}
static u16 vfe_get_ub_size(u8 vfe_id)
@@ -368,30 +370,26 @@ static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
+ *width = pix->width;
+ *height = pix->height;
+
switch (pix->pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
if (plane == 1)
*height /= 2;
break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[plane].bytesperline;
break;
-
}
}
@@ -1196,7 +1194,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_4_7 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
index 998429dbb65c..7e6b62c930ac 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
@@ -247,11 +247,13 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_err(dev, "VFE HW Version = 0x%08x\n", hw_version);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
+
+ return hw_version;
}
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
@@ -341,27 +343,24 @@ static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
+ *width = pix->width;
+ *height = pix->height;
+
switch (pix->pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
if (plane == 1)
*height /= 2;
break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[plane].bytesperline;
break;
}
@@ -1180,7 +1179,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_4_8 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index e0f3a36f3f3f..71f78b40e7f5 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -604,6 +604,8 @@ static int vfe_get(struct vfe_device *vfe)
vfe_reset_output_maps(vfe);
vfe_init_outputs(vfe);
+
+ vfe->ops->hw_version(vfe);
} else {
ret = vfe_check_clock_rates(vfe);
if (ret < 0)
@@ -713,8 +715,6 @@ static int vfe_set_power(struct v4l2_subdev *sd, int on)
ret = vfe_get(vfe);
if (ret < 0)
return ret;
-
- vfe->ops->hw_version_read(vfe, vfe->camss->dev);
} else {
vfe_put(vfe);
}
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 844b9275031d..f166d176cb77 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -103,7 +103,7 @@ struct vfe_device;
struct vfe_hw_ops {
void (*enable_irq_common)(struct vfe_device *vfe);
void (*global_reset)(struct vfe_device *vfe);
- void (*hw_version_read)(struct vfe_device *vfe, struct device *dev);
+ u32 (*hw_version)(struct vfe_device *vfe);
irqreturn_t (*isr)(int irq, void *dev);
void (*isr_read)(struct vfe_device *vfe, u32 *value0, u32 *value1);
void (*pm_domain_off)(struct vfe_device *vfe);
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index ef100d5f7763..be091c50a3c0 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -886,9 +886,9 @@ static int camss_of_parse_ports(struct camss *camss)
goto err_cleanup;
}
- csd = v4l2_async_notifier_add_fwnode_subdev(
- &camss->notifier, of_fwnode_handle(remote),
- struct camss_async_subdev);
+ csd = v4l2_async_nf_add_fwnode(&camss->notifier,
+ of_fwnode_handle(remote),
+ struct camss_async_subdev);
of_node_put(remote);
if (IS_ERR(csd)) {
ret = PTR_ERR(csd);
@@ -1361,7 +1361,7 @@ static int camss_probe(struct platform_device *pdev)
goto err_free;
}
- v4l2_async_notifier_init(&camss->notifier);
+ v4l2_async_nf_init(&camss->notifier);
num_subdevs = camss_of_parse_ports(camss);
if (num_subdevs < 0) {
@@ -1397,8 +1397,8 @@ static int camss_probe(struct platform_device *pdev)
if (num_subdevs) {
camss->notifier.ops = &camss_subdev_notifier_ops;
- ret = v4l2_async_notifier_register(&camss->v4l2_dev,
- &camss->notifier);
+ ret = v4l2_async_nf_register(&camss->v4l2_dev,
+ &camss->notifier);
if (ret) {
dev_err(dev,
"Failed to register async subdev nodes: %d\n",
@@ -1436,7 +1436,7 @@ err_register_subdevs:
err_register_entities:
v4l2_device_unregister(&camss->v4l2_dev);
err_cleanup:
- v4l2_async_notifier_cleanup(&camss->notifier);
+ v4l2_async_nf_cleanup(&camss->notifier);
err_free:
kfree(camss);
@@ -1478,8 +1478,8 @@ static int camss_remove(struct platform_device *pdev)
{
struct camss *camss = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&camss->notifier);
- v4l2_async_notifier_cleanup(&camss->notifier);
+ v4l2_async_nf_unregister(&camss->notifier);
+ v4l2_async_nf_cleanup(&camss->notifier);
camss_unregister_entities(camss);
if (atomic_read(&camss->ref_count) == 0)
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 91b15842c555..f5fa81896012 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -65,7 +65,7 @@ static void venus_event_notify(struct venus_core *core, u32 event)
}
mutex_lock(&core->lock);
- core->sys_error = true;
+ set_bit(0, &core->sys_error);
list_for_each_entry(inst, &core->instances, list)
inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL);
mutex_unlock(&core->lock);
@@ -95,9 +95,8 @@ static void venus_sys_error_handler(struct work_struct *work)
failed = true;
}
- hfi_core_deinit(core, true);
-
- mutex_lock(&core->lock);
+ core->ops->core_deinit(core);
+ core->state = CORE_UNINIT;
for (i = 0; i < max_attempts; i++) {
if (!pm_runtime_active(core->dev_dec) && !pm_runtime_active(core->dev_enc))
@@ -105,6 +104,8 @@ static void venus_sys_error_handler(struct work_struct *work)
msleep(10);
}
+ mutex_lock(&core->lock);
+
venus_shutdown(core);
venus_coredump(core);
@@ -161,7 +162,8 @@ static void venus_sys_error_handler(struct work_struct *work)
dev_warn(core->dev, "system error has occurred (recovered)\n");
mutex_lock(&core->lock);
- core->sys_error = false;
+ clear_bit(0, &core->sys_error);
+ wake_up_all(&core->sys_err_done);
mutex_unlock(&core->lock);
}
@@ -267,7 +269,6 @@ static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct venus_core *core;
- struct resource *r;
int ret;
core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
@@ -276,8 +277,7 @@ static int venus_probe(struct platform_device *pdev)
core->dev = dev;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- core->base = devm_ioremap_resource(dev, r);
+ core->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(core->base))
return PTR_ERR(core->base);
@@ -318,6 +318,7 @@ static int venus_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&core->instances);
mutex_init(&core->lock);
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
+ init_waitqueue_head(&core->sys_err_done);
ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, hfi_isr_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
@@ -567,6 +568,69 @@ static const struct venus_resources msm8996_res = {
.fwname = "qcom/venus-4.2/venus.mdt",
};
+static const struct freq_tbl sdm660_freq_table[] = {
+ { 979200, 518400000 },
+ { 489600, 441600000 },
+ { 432000, 404000000 },
+ { 244800, 320000000 },
+ { 216000, 269330000 },
+ { 108000, 133330000 },
+};
+
+static const struct reg_val sdm660_reg_preset[] = {
+ { 0x80010, 0x001f001f },
+ { 0x80018, 0x00000156 },
+ { 0x8001c, 0x00000156 },
+};
+
+static const struct bw_tbl sdm660_bw_table_enc[] = {
+ { 979200, 1044000, 0, 2446336, 0 }, /* 4k UHD @ 30 */
+ { 864000, 887000, 0, 2108416, 0 }, /* 720p @ 240 */
+ { 489600, 666000, 0, 1207296, 0 }, /* 1080p @ 60 */
+ { 432000, 578000, 0, 1058816, 0 }, /* 720p @ 120 */
+ { 244800, 346000, 0, 616448, 0 }, /* 1080p @ 30 */
+ { 216000, 293000, 0, 534528, 0 }, /* 720p @ 60 */
+ { 108000, 151000, 0, 271360, 0 }, /* 720p @ 30 */
+};
+
+static const struct bw_tbl sdm660_bw_table_dec[] = {
+ { 979200, 2365000, 0, 1892000, 0 }, /* 4k UHD @ 30 */
+ { 864000, 1978000, 0, 1554000, 0 }, /* 720p @ 240 */
+ { 489600, 1133000, 0, 895000, 0 }, /* 1080p @ 60 */
+ { 432000, 994000, 0, 781000, 0 }, /* 720p @ 120 */
+ { 244800, 580000, 0, 460000, 0 }, /* 1080p @ 30 */
+ { 216000, 501000, 0, 301000, 0 }, /* 720p @ 60 */
+ { 108000, 255000, 0, 202000, 0 }, /* 720p @ 30 */
+};
+
+static const struct venus_resources sdm660_res = {
+ .freq_tbl = sdm660_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sdm660_freq_table),
+ .reg_tbl = sdm660_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(sdm660_reg_preset),
+ .bw_tbl_enc = sdm660_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sdm660_bw_table_enc),
+ .bw_tbl_dec = sdm660_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sdm660_bw_table_dec),
+ .clks = {"core", "iface", "bus", "bus_throttle" },
+ .clks_num = 4,
+ .vcodec0_clks = { "vcodec0_core" },
+ .vcodec1_clks = { "vcodec0_core" },
+ .vcodec_clks_num = 1,
+ .vcodec_num = 1,
+ .max_load = 1036800,
+ .hfi_version = HFI_VERSION_3XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .cp_start = 0,
+ .cp_size = 0x79000000,
+ .cp_nonpixel_start = 0x1000000,
+ .cp_nonpixel_size = 0x28000000,
+ .dma_mask = 0xd9000000 - 1,
+ .fwname = "qcom/venus-4.4/venus.mdt",
+};
+
static const struct freq_tbl sdm845_freq_table[] = {
{ 3110400, 533000000 }, /* 4096x2160@90 */
{ 2073600, 444000000 }, /* 4096x2160@60 */
@@ -729,6 +793,7 @@ static const struct venus_resources sm8250_res = {
.vcodec_num = 1,
.max_load = 7833600,
.hfi_version = HFI_VERSION_6XX,
+ .num_vpp_pipes = 4,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
@@ -736,12 +801,66 @@ static const struct venus_resources sm8250_res = {
.fwname = "qcom/vpu-1.0/venus.mdt",
};
+static const struct freq_tbl sc7280_freq_table[] = {
+ { 0, 460000000 },
+ { 0, 424000000 },
+ { 0, 335000000 },
+ { 0, 240000000 },
+ { 0, 133333333 },
+};
+
+static const struct bw_tbl sc7280_bw_table_enc[] = {
+ { 1944000, 1896000, 0, 3657000, 0 }, /* 3840x2160@60 */
+ { 972000, 968000, 0, 1848000, 0 }, /* 3840x2160@30 */
+ { 489600, 618000, 0, 941000, 0 }, /* 1920x1080@60 */
+ { 244800, 318000, 0, 480000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct bw_tbl sc7280_bw_table_dec[] = {
+ { 2073600, 2128000, 0, 3831000, 0 }, /* 4096x2160@60 */
+ { 1036800, 1085000, 0, 1937000, 0 }, /* 4096x2160@30 */
+ { 489600, 779000, 0, 998000, 0 }, /* 1920x1080@60 */
+ { 244800, 400000, 0, 509000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct reg_val sm7280_reg_preset[] = {
+ { 0xb0088, 0 },
+};
+
+static const struct venus_resources sc7280_res = {
+ .freq_tbl = sc7280_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sc7280_freq_table),
+ .reg_tbl = sm7280_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(sm7280_reg_preset),
+ .bw_tbl_enc = sc7280_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sc7280_bw_table_enc),
+ .bw_tbl_dec = sc7280_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sc7280_bw_table_dec),
+ .clks = {"core", "bus", "iface"},
+ .clks_num = 3,
+ .vcodec0_clks = {"vcodec_core", "vcodec_bus"},
+ .vcodec_clks_num = 2,
+ .vcodec_pmdomains = { "venus", "vcodec0" },
+ .vcodec_pmdomains_num = 2,
+ .opp_pmdomain = (const char *[]) { "cx", NULL },
+ .vcodec_num = 1,
+ .hfi_version = HFI_VERSION_6XX,
+ .num_vpp_pipes = 1,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu-2.0/venus.mbn",
+};
+
static const struct of_device_id venus_dt_match[] = {
{ .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
{ .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
+ { .compatible = "qcom,sdm660-venus", .data = &sdm660_res, },
{ .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
{ .compatible = "qcom,sdm845-venus-v2", .data = &sdm845_res_v2, },
{ .compatible = "qcom,sc7180-venus", .data = &sc7180_res, },
+ { .compatible = "qcom,sc7280-venus", .data = &sc7280_res, },
{ .compatible = "qcom,sm8250-venus", .data = &sm8250_res, },
{ }
};
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 5ec851115eca..7c3bac01cd49 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -7,6 +7,7 @@
#ifndef __VENUS_CORE_H_
#define __VENUS_CORE_H_
+#include <linux/bitops.h>
#include <linux/list.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-ctrls.h>
@@ -68,6 +69,7 @@ struct venus_resources {
const char * const resets[VIDC_RESETS_NUM_MAX];
unsigned int resets_num;
enum hfi_version hfi_version;
+ u8 num_vpp_pipes;
u32 max_load;
unsigned int vmem_id;
u32 vmem_size;
@@ -181,7 +183,8 @@ struct venus_core {
unsigned int state;
struct completion done;
unsigned int error;
- bool sys_error;
+ unsigned long sys_error;
+ wait_queue_head_t sys_err_done;
const struct hfi_core_ops *core_ops;
const struct venus_pm_ops *pm_ops;
struct mutex pm_lock;
@@ -334,6 +337,7 @@ enum venus_inst_modes {
* @registeredbufs: a list of registered capture bufferes
* @delayed_process: a list of delayed buffers
* @delayed_process_work: a work_struct for process delayed buffers
+ * @nonblock: nonblocking flag
* @ctrl_handler: v4l control handler
* @controls: a union of decoder and encoder control parameters
* @fh: a holder of v4l file handle structure
@@ -397,6 +401,7 @@ struct venus_inst {
struct list_head registeredbufs;
struct list_head delayed_process;
struct work_struct delayed_process_work;
+ bool nonblock;
struct v4l2_ctrl_handler ctrl_handler;
union {
@@ -408,6 +413,7 @@ struct venus_inst {
u32 width;
u32 height;
struct v4l2_rect crop;
+ u32 fw_min_cnt;
u32 out_width;
u32 out_height;
u32 colorspace;
@@ -452,6 +458,7 @@ struct venus_inst {
bool next_buf_last;
bool drain_active;
enum venus_inst_modes flags;
+ struct ida dpb_ids;
};
#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 227bd3b3f84c..14b6f1d05991 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -27,7 +27,12 @@
static void venus_reset_cpu(struct venus_core *core)
{
u32 fw_size = core->fw.mapped_mem_size;
- void __iomem *wrapper_base = core->wrapper_base;
+ void __iomem *wrapper_base;
+
+ if (IS_V6(core))
+ wrapper_base = core->wrapper_tz_base;
+ else
+ wrapper_base = core->wrapper_base;
writel(0, wrapper_base + WRAPPER_FW_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR);
@@ -35,11 +40,17 @@ static void venus_reset_cpu(struct venus_core *core)
writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
- writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS);
- writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG);
- /* Bring ARM9 out of reset */
- writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET);
+ if (IS_V6(core)) {
+ /* Bring XTSS out of reset */
+ writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
+ } else {
+ writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS);
+ writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG);
+
+ /* Bring ARM9 out of reset */
+ writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET);
+ }
}
int venus_set_hw_state(struct venus_core *core, bool resume)
@@ -56,7 +67,9 @@ int venus_set_hw_state(struct venus_core *core, bool resume)
if (resume) {
venus_reset_cpu(core);
} else {
- if (!IS_V6(core))
+ if (IS_V6(core))
+ writel(1, core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ else
writel(1, core->wrapper_base + WRAPPER_A9SS_SW_RESET);
}
@@ -162,12 +175,19 @@ static int venus_shutdown_no_tz(struct venus_core *core)
u32 reg;
struct device *dev = core->fw.dev;
void __iomem *wrapper_base = core->wrapper_base;
+ void __iomem *wrapper_tz_base = core->wrapper_tz_base;
- /* Assert the reset to ARM9 */
- reg = readl_relaxed(wrapper_base + WRAPPER_A9SS_SW_RESET);
- reg |= WRAPPER_A9SS_SW_RESET_BIT;
- writel_relaxed(reg, wrapper_base + WRAPPER_A9SS_SW_RESET);
-
+ if (IS_V6(core)) {
+ /* Assert the reset to XTSS */
+ reg = readl_relaxed(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ reg |= WRAPPER_XTSS_SW_RESET_BIT;
+ writel_relaxed(reg, wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ } else {
+ /* Assert the reset to ARM9 */
+ reg = readl_relaxed(wrapper_base + WRAPPER_A9SS_SW_RESET);
+ reg |= WRAPPER_A9SS_SW_RESET_BIT;
+ writel_relaxed(reg, wrapper_base + WRAPPER_A9SS_SW_RESET);
+ }
/* Make sure reset is asserted before the mapping is removed */
mb();
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 8012f5c7bf34..84c3a511ec31 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
+#include <linux/idr.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -18,8 +19,13 @@
#include "hfi_platform.h"
#include "hfi_parser.h"
-#define NUM_MBS_720P (((1280 + 15) >> 4) * ((720 + 15) >> 4))
-#define NUM_MBS_4K (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
+#define NUM_MBS_720P (((ALIGN(1280, 16)) >> 4) * ((ALIGN(736, 16)) >> 4))
+#define NUM_MBS_4K (((ALIGN(4096, 16)) >> 4) * ((ALIGN(2304, 16)) >> 4))
+
+enum dpb_buf_owner {
+ DRIVER,
+ FIRMWARE,
+};
struct intbuf {
struct list_head list;
@@ -28,6 +34,8 @@ struct intbuf {
void *va;
dma_addr_t da;
unsigned long attrs;
+ enum dpb_buf_owner owned_by;
+ u32 dpb_out_tag;
};
bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
@@ -95,9 +103,16 @@ int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
fdata.device_addr = buf->da;
fdata.buffer_type = buf->type;
+ if (buf->owned_by == FIRMWARE)
+ continue;
+
+ fdata.clnt_data = buf->dpb_out_tag;
+
ret = hfi_session_process_buf(inst, &fdata);
if (ret)
goto fail;
+
+ buf->owned_by = FIRMWARE;
}
fail:
@@ -110,13 +125,19 @@ int venus_helper_free_dpb_bufs(struct venus_inst *inst)
struct intbuf *buf, *n;
list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
+ if (buf->owned_by == FIRMWARE)
+ continue;
+
+ ida_free(&inst->dpb_ids, buf->dpb_out_tag);
+
list_del_init(&buf->list);
dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
buf->attrs);
kfree(buf);
}
- INIT_LIST_HEAD(&inst->dpbbufs);
+ if (list_empty(&inst->dpbbufs))
+ INIT_LIST_HEAD(&inst->dpbbufs);
return 0;
}
@@ -134,6 +155,7 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
unsigned int i;
u32 count;
int ret;
+ int id;
/* no need to allocate dpb buffers */
if (!inst->dpb_fmt)
@@ -171,6 +193,15 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
ret = -ENOMEM;
goto fail;
}
+ buf->owned_by = DRIVER;
+
+ id = ida_alloc_min(&inst->dpb_ids, VB2_MAX_FRAME, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto fail;
+ }
+
+ buf->dpb_out_tag = id;
list_add_tail(&buf->list, &inst->dpbbufs);
}
@@ -583,7 +614,7 @@ static int platform_get_bufreq(struct venus_inst *inst, u32 buftype,
return -EINVAL;
params.version = version;
- params.num_vpp_pipes = hfi_platform_num_vpp_pipes(version);
+ params.num_vpp_pipes = inst->core->res->num_vpp_pipes;
if (is_dec) {
params.width = inst->width;
@@ -623,9 +654,15 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
if (req)
memset(req, 0, sizeof(*req));
+ if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
+ req->count_min = inst->fw_min_cnt;
+
ret = platform_get_bufreq(inst, type, req);
- if (!ret)
+ if (!ret) {
+ if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
+ inst->fw_min_cnt = req->count_min;
return 0;
+ }
ret = hfi_session_get_property(inst, ptype, &hprop);
if (ret)
@@ -1365,6 +1402,24 @@ venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
}
EXPORT_SYMBOL_GPL(venus_helper_find_buf);
+void venus_helper_change_dpb_owner(struct venus_inst *inst,
+ struct vb2_v4l2_buffer *vbuf, unsigned int type,
+ unsigned int buf_type, u32 tag)
+{
+ struct intbuf *dpb_buf;
+
+ if (!V4L2_TYPE_IS_CAPTURE(type) ||
+ buf_type != inst->dpb_buftype)
+ return;
+
+ list_for_each_entry(dpb_buf, &inst->dpbbufs, list)
+ if (dpb_buf->dpb_out_tag == tag) {
+ dpb_buf->owned_by = DRIVER;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_change_dpb_owner);
+
int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
@@ -1480,7 +1535,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
ret |= venus_helper_intbufs_free(inst);
ret |= hfi_session_deinit(inst);
- if (inst->session_error || core->sys_error)
+ if (inst->session_error || test_bit(0, &core->sys_error))
ret = -EIO;
if (ret)
@@ -1504,10 +1559,24 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
venus_pm_release_core(inst);
+ inst->session_error = 0;
+
mutex_unlock(&inst->lock);
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
+void venus_helper_vb2_queue_error(struct venus_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct vb2_queue *q;
+
+ q = v4l2_m2m_get_src_vq(m2m_ctx);
+ vb2_queue_error(q);
+ q = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vb2_queue_error(q);
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_queue_error);
+
int venus_helper_process_initial_cap_bufs(struct venus_inst *inst)
{
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index e6269b4be3af..32619c3e8c97 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -14,6 +14,9 @@ struct venus_core;
bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt);
struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst,
unsigned int type, u32 idx);
+void venus_helper_change_dpb_owner(struct venus_inst *inst,
+ struct vb2_v4l2_buffer *vbuf, unsigned int type,
+ unsigned int buf_type, u32 idx);
void venus_helper_buffers_done(struct venus_inst *inst, unsigned int type,
enum vb2_buffer_state state);
int venus_helper_vb2_buf_init(struct vb2_buffer *vb);
@@ -21,6 +24,7 @@ int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb);
void venus_helper_vb2_buf_queue(struct vb2_buffer *vb);
void venus_helper_vb2_stop_streaming(struct vb2_queue *q);
int venus_helper_vb2_start_streaming(struct venus_inst *inst);
+void venus_helper_vb2_queue_error(struct venus_inst *inst);
void venus_helper_m2m_device_run(void *priv);
void venus_helper_m2m_job_abort(void *priv);
int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index 0f2482367e06..4e2151fb47f0 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -187,6 +187,11 @@ int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
mutex_lock(&core->lock);
+ if (test_bit(0, &inst->core->sys_error)) {
+ ret = -EIO;
+ goto unlock;
+ }
+
max = atomic_add_unless(&core->insts_count, 1,
core->max_sessions_supported);
if (!max) {
@@ -196,6 +201,7 @@ int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
ret = 0;
}
+unlock:
mutex_unlock(&core->lock);
return ret;
@@ -214,7 +220,7 @@ int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
* session_init() can't pass successfully
*/
mutex_lock(&core->lock);
- if (!core->ops || core->sys_error) {
+ if (!core->ops || test_bit(0, &inst->core->sys_error)) {
mutex_unlock(&core->lock);
return -EIO;
}
@@ -263,6 +269,9 @@ int hfi_session_deinit(struct venus_inst *inst)
if (inst->state < INST_INIT)
return -EINVAL;
+ if (test_bit(0, &inst->core->sys_error))
+ goto done;
+
reinit_completion(&inst->done);
ret = ops->session_end(inst);
@@ -273,6 +282,7 @@ int hfi_session_deinit(struct venus_inst *inst)
if (ret)
return ret;
+done:
inst->state = INST_UNINIT;
return 0;
@@ -284,6 +294,9 @@ int hfi_session_start(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_LOAD_RESOURCES)
return -EINVAL;
@@ -308,6 +321,9 @@ int hfi_session_stop(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_START)
return -EINVAL;
@@ -331,6 +347,9 @@ int hfi_session_continue(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (core->res->hfi_version == HFI_VERSION_1XX)
return 0;
@@ -343,6 +362,9 @@ int hfi_session_abort(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
reinit_completion(&inst->done);
ret = ops->session_abort(inst);
@@ -362,6 +384,9 @@ int hfi_session_load_res(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_INIT)
return -EINVAL;
@@ -385,6 +410,9 @@ int hfi_session_unload_res(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_STOP)
return -EINVAL;
@@ -409,6 +437,9 @@ int hfi_session_flush(struct venus_inst *inst, u32 type, bool block)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
reinit_completion(&inst->done);
ret = ops->session_flush(inst, type);
@@ -429,6 +460,9 @@ int hfi_session_set_buffers(struct venus_inst *inst, struct hfi_buffer_desc *bd)
{
const struct hfi_ops *ops = inst->core->ops;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
return ops->session_set_buffers(inst, bd);
}
@@ -438,6 +472,9 @@ int hfi_session_unset_buffers(struct venus_inst *inst,
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
reinit_completion(&inst->done);
ret = ops->session_unset_buffers(inst, bd);
@@ -460,6 +497,9 @@ int hfi_session_get_property(struct venus_inst *inst, u32 ptype,
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state < INST_INIT || inst->state >= INST_STOP)
return -EINVAL;
@@ -483,6 +523,9 @@ int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata)
{
const struct hfi_ops *ops = inst->core->ops;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state < INST_INIT || inst->state >= INST_STOP)
return -EINVAL;
@@ -494,6 +537,9 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
{
const struct hfi_ops *ops = inst->core->ops;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (fd->buffer_type == HFI_BUFFER_INPUT)
return ops->session_etb(inst, fd);
else if (fd->buffer_type == HFI_BUFFER_OUTPUT ||
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 60f4b8e4b8d0..5aea07307e02 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1299,6 +1299,13 @@ pkt_session_set_property_6xx(struct hfi_session_set_property_pkt *pkt,
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cq);
break;
}
+ case HFI_PROPERTY_PARAM_WORK_ROUTE: {
+ struct hfi_video_work_route *in = pdata, *wr = prop_data;
+
+ wr->video_work_route = in->video_work_route;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wr);
+ break;
+ }
default:
return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
}
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index bec4feb63ceb..2daa88e3df9f 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -167,6 +167,7 @@
#define HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA 0x120300c
#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE 0x120300d
#define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY 0x120300e
+#define HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS 0x120300e
#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA 0x1203011
#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA 0x1203012
#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA 0x1203013
@@ -448,6 +449,7 @@
#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT 0x100f
#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED 0x1010
#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
+#define HFI_PROPERTY_PARAM_WORK_ROUTE 0x1017
/*
* HFI_PROPERTY_CONFIG_COMMON_START
@@ -873,6 +875,10 @@ struct hfi_video_work_mode {
u32 video_work_mode;
};
+struct hfi_video_work_route {
+ u32 video_work_route;
+};
+
struct hfi_h264_vui_timing_info {
u32 enable;
u32 fixed_framerate;
@@ -910,6 +916,14 @@ struct hfi_extradata_input_crop {
u32 height;
};
+struct hfi_dpb_counts {
+ u32 max_dpb_count;
+ u32 max_ref_frames;
+ u32 max_dec_buffering;
+ u32 max_reorder_frames;
+ u32 fw_min_cnt;
+};
+
#define HFI_COLOR_FORMAT_MONOCHROME 0x01
#define HFI_COLOR_FORMAT_NV12 0x02
#define HFI_COLOR_FORMAT_NV21 0x03
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 9a2bdb002edc..df96db3761a7 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -32,6 +32,7 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
struct hfi_colour_space *colour_info;
struct hfi_buffer_requirements *bufreq;
struct hfi_extradata_input_crop *crop;
+ struct hfi_dpb_counts *dpb_count;
u8 *data_ptr;
u32 ptype;
@@ -110,6 +111,12 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
event.input_crop.height = crop->height;
data_ptr += sizeof(*crop);
break;
+ case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS:
+ data_ptr += sizeof(u32);
+ dpb_count = (struct hfi_dpb_counts *)data_ptr;
+ event.buf_count = dpb_count->fw_min_cnt;
+ data_ptr += sizeof(*dpb_count);
+ break;
default:
break;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
index 479178b0600d..ea25c451222b 100644
--- a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
@@ -1164,7 +1164,7 @@ static int output_buffer_count(u32 session_type, u32 codec)
output_min_count = 6;
break;
case V4L2_PIX_FMT_VP9:
- output_min_count = 9;
+ output_min_count = 11;
break;
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_HEVC:
@@ -1213,6 +1213,8 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
}
out_min_count = output_buffer_count(VIDC_SESSION_TYPE_DEC, codec);
+ /* Max of driver and FW count */
+ out_min_count = max(out_min_count, bufreq->count_min);
bufreq->type = buftype;
bufreq->region_size = 0;
@@ -1237,7 +1239,7 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) {
bufreq->size = dec_ops->scratch(width, height, is_interlaced);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) {
- bufreq->size = dec_ops->scratch1(width, height, out_min_count,
+ bufreq->size = dec_ops->scratch1(width, height, VB2_MAX_FRAME,
is_secondary_output,
num_vpp_pipes);
} else if (buftype == HFI_BUFFER_INTERNAL_PERSIST_1) {
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c
index f5b4e1f4764f..f16f8962273c 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform.c
@@ -66,16 +66,3 @@ hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_
return freq;
}
-u8 hfi_platform_num_vpp_pipes(enum hfi_version version)
-{
- const struct hfi_platform *plat;
-
- plat = hfi_platform_get(version);
- if (!plat)
- return 0;
-
- if (plat->num_vpp_pipes)
- return plat->num_vpp_pipes();
-
- return 0;
-}
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.h b/drivers/media/platform/qcom/venus/hfi_platform.h
index 2dbe608c53af..1dcf4085928c 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.h
+++ b/drivers/media/platform/qcom/venus/hfi_platform.h
@@ -52,7 +52,6 @@ struct hfi_platform {
unsigned long (*codec_lp_freq)(u32 session_type, u32 codec);
void (*codecs)(u32 *enc_codecs, u32 *dec_codecs, u32 *count);
const struct hfi_plat_caps *(*capabilities)(unsigned int *entries);
- u8 (*num_vpp_pipes)(void);
int (*bufreq)(struct hfi_plat_buffers_params *params, u32 session_type,
u32 buftype, struct hfi_buffer_requirements *bufreq);
};
@@ -67,5 +66,4 @@ unsigned long hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 code
u32 session_type);
unsigned long hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec,
u32 session_type);
-u8 hfi_platform_num_vpp_pipes(enum hfi_version version);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v6.c b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
index d8243b22568a..c10618e44f5d 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
@@ -322,17 +322,11 @@ static unsigned long codec_lp_freq(u32 session_type, u32 codec)
return 0;
}
-static u8 num_vpp_pipes(void)
-{
- return 4;
-}
-
const struct hfi_platform hfi_plat_v6 = {
.codec_vpp_freq = codec_vpp_freq,
.codec_vsp_freq = codec_vsp_freq,
.codec_lp_freq = codec_lp_freq,
.codecs = get_codecs,
.capabilities = get_capabilities,
- .num_vpp_pipes = num_vpp_pipes,
.bufreq = hfi_plat_bufreq_v6,
};
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index ce98c523b3c6..3a75a27632fb 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -551,6 +551,9 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
if (IS_V6(hdev->core)) {
writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
+ if (hdev->core->res->num_vpp_pipes == 1)
+ goto skip_aon_mvp_noc;
+
writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
val,
@@ -560,6 +563,7 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
if (ret)
return -ETIMEDOUT;
+skip_aon_mvp_noc:
mask_val = (BIT(2) | BIT(1) | BIT(0));
writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
index 300c6e47e72f..9735a246ce36 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus_io.h
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -149,6 +149,8 @@
/* Wrapper TZ 6xx */
#define WRAPPER_TZ_BASE_V6 0x000c0000
#define WRAPPER_TZ_CPU_STATUS_V6 0x10
+#define WRAPPER_TZ_XTSS_SW_RESET 0x1000
+#define WRAPPER_XTSS_SW_RESET_BIT BIT(0)
/* Venus AON */
#define AON_BASE_V6 0x000e0000
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 3e2345eb47f7..cedc664ba755 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -1085,12 +1085,16 @@ static unsigned long calculate_inst_freq(struct venus_inst *inst,
if (inst->state != INST_START)
return 0;
- if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
vpp_freq_per_mb = inst->flags & VENUS_LOW_POWER ?
inst->clk_data.low_power_freq :
inst->clk_data.vpp_freq;
- vpp_freq = mbs_per_sec * vpp_freq_per_mb;
+ vpp_freq = mbs_per_sec * vpp_freq_per_mb;
+ } else {
+ vpp_freq = mbs_per_sec * inst->clk_data.vpp_freq;
+ }
+
/* 21 / 20 is overhead factor */
vpp_freq += vpp_freq / 20;
vsp_freq = mbs_per_sec * inst->clk_data.vsp_freq;
@@ -1139,9 +1143,10 @@ static int load_scale_v4(struct venus_inst *inst)
freq = max(freq_core1, freq_core2);
if (freq > table[0].freq) {
+ dev_dbg(dev, VDBGL "requested clock rate: %lu scaling clock rate : %lu\n",
+ freq, table[0].freq);
+
freq = table[0].freq;
- dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
- freq, table[0].freq);
goto set_freq;
}
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 198e47eb63f4..91da3f509724 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -332,8 +332,11 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->fmt_out = fmt;
- else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
inst->fmt_cap = fmt;
+ inst->output2_buf_size =
+ venus_helper_get_framesz(pixfmt_cap, orig_pixmp.width, orig_pixmp.height);
+ }
return 0;
}
@@ -653,6 +656,19 @@ static int vdec_set_properties(struct venus_inst *inst)
return 0;
}
+static int vdec_set_work_route(struct venus_inst *inst)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_WORK_ROUTE;
+ struct hfi_video_work_route wr;
+
+ if (!IS_V6(inst->core))
+ return 0;
+
+ wr.video_work_route = inst->core->res->num_vpp_pipes;
+
+ return hfi_session_set_property(inst, ptype, &wr);
+}
+
#define is_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_UBWC_BASE))
static int vdec_output_conf(struct venus_inst *inst)
@@ -830,6 +846,7 @@ static int vdec_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
+ struct venus_core *core = inst->core;
unsigned int in_num, out_num;
int ret = 0;
@@ -855,6 +872,16 @@ static int vdec_queue_setup(struct vb2_queue *q,
return 0;
}
+ if (test_bit(0, &core->sys_error)) {
+ if (inst->nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(core->sys_err_done,
+ !test_bit(0, &core->sys_error));
+ if (ret)
+ return ret;
+ }
+
ret = vdec_pm_get(inst);
if (ret)
return ret;
@@ -970,23 +997,23 @@ reconfigure:
if (ret)
goto err;
+ venus_pm_load_scale(inst);
+
+ inst->next_buf_last = false;
+
ret = venus_helper_alloc_dpb_bufs(inst);
if (ret)
goto err;
- ret = venus_helper_queue_dpb_bufs(inst);
+ ret = hfi_session_continue(inst);
if (ret)
goto free_dpb_bufs;
- ret = venus_helper_process_initial_cap_bufs(inst);
+ ret = venus_helper_queue_dpb_bufs(inst);
if (ret)
goto free_dpb_bufs;
- venus_pm_load_scale(inst);
-
- inst->next_buf_last = false;
-
- ret = hfi_session_continue(inst);
+ ret = venus_helper_process_initial_cap_bufs(inst);
if (ret)
goto free_dpb_bufs;
@@ -1039,6 +1066,10 @@ static int vdec_start_output(struct venus_inst *inst)
if (ret)
return ret;
+ ret = vdec_set_work_route(inst);
+ if (ret)
+ return ret;
+
ret = vdec_output_conf(inst);
if (ret)
return ret;
@@ -1178,6 +1209,8 @@ static void vdec_stop_streaming(struct vb2_queue *q)
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR);
+ inst->session_error = 0;
+
if (ret)
goto unlock;
@@ -1211,7 +1244,7 @@ static void vdec_session_release(struct venus_inst *inst)
ret = hfi_session_deinit(inst);
abort = (ret && ret != -EINVAL) ? 1 : 0;
- if (inst->session_error || core->sys_error)
+ if (inst->session_error || test_bit(0, &core->sys_error))
abort = 1;
if (abort)
@@ -1306,8 +1339,10 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
vbuf = venus_helper_find_buf(inst, type, tag);
- if (!vbuf)
+ if (!vbuf) {
+ venus_helper_change_dpb_owner(inst, vbuf, type, buf_type, tag);
return;
+ }
vbuf->flags = flags;
vbuf->field = V4L2_FIELD_NONE;
@@ -1389,6 +1424,11 @@ static void vdec_event_change(struct venus_inst *inst,
inst->crop.height = ev_data->height;
}
+ inst->fw_min_cnt = ev_data->buf_count;
+ /* overwriting this to 11 for vp9 due to fw bug */
+ if (inst->hfi_codec == HFI_VIDEO_CODEC_VP9)
+ inst->fw_min_cnt = 11;
+
inst->out_width = ev_data->width;
inst->out_height = ev_data->height;
@@ -1448,6 +1488,7 @@ static void vdec_event_notify(struct venus_inst *inst, u32 event,
switch (event) {
case EVT_SESSION_ERROR:
inst->session_error = true;
+ venus_helper_vb2_queue_error(inst);
dev_err(dev, "dec: event session error %x\n", inst->error);
break;
case EVT_SYS_EVENT_CHANGE:
@@ -1492,6 +1533,7 @@ static void vdec_inst_init(struct venus_inst *inst)
inst->crop.top = 0;
inst->crop.width = inst->width;
inst->crop.height = inst->height;
+ inst->fw_min_cnt = 8;
inst->out_width = frame_width_min(inst);
inst->out_height = frame_height_min(inst);
inst->fps = 30;
@@ -1568,6 +1610,8 @@ static int vdec_open(struct file *file)
inst->bit_depth = VIDC_BITDEPTH_8;
inst->pic_struct = HFI_INTERLACE_FRAME_PROGRESSIVE;
init_waitqueue_head(&inst->reconf_wait);
+ inst->nonblock = file->f_flags & O_NONBLOCK;
+
venus_helper_init_instance(inst);
ret = vdec_ctrl_init(inst);
@@ -1580,6 +1624,8 @@ static int vdec_open(struct file *file)
vdec_inst_init(inst);
+ ida_init(&inst->dpb_ids);
+
/*
* create m2m device for every instance, the m2m context scheduling
* is made by firmware side so we do not need to care about.
@@ -1625,6 +1671,7 @@ static int vdec_close(struct file *file)
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
vdec_ctrl_deinit(inst);
+ ida_destroy(&inst->dpb_ids);
hfi_session_destroy(inst);
mutex_destroy(&inst->lock);
v4l2_fh_del(&inst->fh);
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index bc1c42dd53c0..84bafc3118cc 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -538,6 +538,64 @@ static const struct v4l2_ioctl_ops venc_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
+static int venc_pm_get(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_enc;
+ int ret;
+
+ mutex_lock(&core->pm_lock);
+ ret = pm_runtime_resume_and_get(dev);
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int venc_pm_put(struct venus_inst *inst, bool autosuspend)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_enc;
+ int ret;
+
+ mutex_lock(&core->pm_lock);
+
+ if (autosuspend)
+ ret = pm_runtime_put_autosuspend(dev);
+ else
+ ret = pm_runtime_put_sync(dev);
+
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int venc_pm_get_put(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_enc;
+ int ret = 0;
+
+ mutex_lock(&core->pm_lock);
+
+ if (pm_runtime_suspended(dev)) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto error;
+
+ ret = pm_runtime_put_autosuspend(dev);
+ }
+
+error:
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static void venc_pm_touch(struct venus_inst *inst)
+{
+ pm_runtime_mark_last_busy(inst->core->dev_enc);
+}
+
static int venc_set_properties(struct venus_inst *inst)
{
struct venc_controls *ctr = &inst->controls.enc;
@@ -908,6 +966,7 @@ static int venc_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
+ struct venus_core *core = inst->core;
unsigned int num, min = 4;
int ret;
@@ -931,11 +990,29 @@ static int venc_queue_setup(struct vb2_queue *q,
return 0;
}
+ if (test_bit(0, &core->sys_error)) {
+ if (inst->nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(core->sys_err_done,
+ !test_bit(0, &core->sys_error));
+ if (ret)
+ return ret;
+ }
+
+ ret = venc_pm_get(inst);
+ if (ret)
+ return ret;
+
mutex_lock(&inst->lock);
ret = venc_init_session(inst);
mutex_unlock(&inst->lock);
if (ret)
+ goto put_power;
+
+ ret = venc_pm_put(inst, false);
+ if (ret)
return ret;
switch (q->type) {
@@ -971,6 +1048,9 @@ static int venc_queue_setup(struct vb2_queue *q,
}
return ret;
+put_power:
+ venc_pm_put(inst, false);
+ return ret;
}
static int venc_buf_init(struct vb2_buffer *vb)
@@ -986,6 +1066,8 @@ static void venc_release_session(struct venus_inst *inst)
{
int ret;
+ venc_pm_get(inst);
+
mutex_lock(&inst->lock);
ret = hfi_session_deinit(inst);
@@ -997,6 +1079,8 @@ static void venc_release_session(struct venus_inst *inst)
venus_pm_load_scale(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
venus_pm_release_core(inst);
+
+ venc_pm_put(inst, false);
}
static void venc_buf_cleanup(struct vb2_buffer *vb)
@@ -1066,8 +1150,16 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
inst->sequence_cap = 0;
inst->sequence_out = 0;
+ ret = venc_pm_get(inst);
+ if (ret)
+ goto error;
+
ret = venus_pm_acquire_core(inst);
if (ret)
+ goto put_power;
+
+ ret = venc_pm_put(inst, true);
+ if (ret)
goto error;
ret = venc_set_properties(inst);
@@ -1091,6 +1183,8 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
+put_power:
+ venc_pm_put(inst, false);
error:
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
@@ -1105,6 +1199,8 @@ static void venc_vb2_buf_queue(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ venc_pm_get_put(inst);
+
mutex_lock(&inst->lock);
venus_helper_vb2_buf_queue(vb);
mutex_unlock(&inst->lock);
@@ -1128,6 +1224,8 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
struct vb2_buffer *vb;
unsigned int type;
+ venc_pm_touch(inst);
+
if (buf_type == HFI_BUFFER_INPUT)
type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
else
@@ -1157,8 +1255,11 @@ static void venc_event_notify(struct venus_inst *inst, u32 event,
{
struct device *dev = inst->core->dev_enc;
+ venc_pm_touch(inst);
+
if (event == EVT_SESSION_ERROR) {
inst->session_error = true;
+ venus_helper_vb2_queue_error(inst);
dev_err(dev, "enc: event session error %x\n", inst->error);
}
}
@@ -1242,16 +1343,13 @@ static int venc_open(struct file *file)
inst->session_type = VIDC_SESSION_TYPE_ENC;
inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
inst->core_acquired = false;
+ inst->nonblock = file->f_flags & O_NONBLOCK;
venus_helper_init_instance(inst);
- ret = pm_runtime_resume_and_get(core->dev_enc);
- if (ret < 0)
- goto err_free;
-
ret = venc_ctrl_init(inst);
if (ret)
- goto err_put_sync;
+ goto err_free;
ret = hfi_session_create(inst, &venc_hfi_ops);
if (ret)
@@ -1290,8 +1388,6 @@ err_session_destroy:
hfi_session_destroy(inst);
err_ctrl_deinit:
venc_ctrl_deinit(inst);
-err_put_sync:
- pm_runtime_put_sync(core->dev_enc);
err_free:
kfree(inst);
return ret;
@@ -1301,6 +1397,8 @@ static int venc_close(struct file *file)
{
struct venus_inst *inst = to_inst(file);
+ venc_pm_get(inst);
+
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
venc_ctrl_deinit(inst);
@@ -1309,7 +1407,7 @@ static int venc_close(struct file *file)
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
- pm_runtime_put_sync(inst->core->dev_enc);
+ venc_pm_put(inst, false);
kfree(inst);
return 0;
@@ -1366,6 +1464,8 @@ static int venc_probe(struct platform_device *pdev)
core->dev_enc = dev;
video_set_drvdata(vdev, core);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
+ pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/media/platform/rcar-isp.c b/drivers/media/platform/rcar-isp.c
new file mode 100644
index 000000000000..2ffab30bc011
--- /dev/null
+++ b/drivers/media/platform/rcar-isp.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * Driver for Renesas R-Car ISP Channel Selector
+ *
+ * The ISP hardware is capable of more than just channel selection, features
+ * such as demosaicing, white balance control and color space conversion are
+ * also possible. These more advanced features are not supported by the driver
+ * due to lack of documentation.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-subdev.h>
+
+#define ISPINPUTSEL0_REG 0x0008
+#define ISPINPUTSEL0_SEL_CSI0 BIT(31)
+
+#define ISPSTART_REG 0x0014
+#define ISPSTART_START 0xffff
+#define ISPSTART_STOP 0x0000
+
+#define ISPPROCMODE_DT_REG(n) (0x1100 + (0x4 * (n)))
+#define ISPPROCMODE_DT_PROC_MODE_VC3(pm) (((pm) & 0x3f) << 24)
+#define ISPPROCMODE_DT_PROC_MODE_VC2(pm) (((pm) & 0x3f) << 16)
+#define ISPPROCMODE_DT_PROC_MODE_VC1(pm) (((pm) & 0x3f) << 8)
+#define ISPPROCMODE_DT_PROC_MODE_VC0(pm) ((pm) & 0x3f)
+
+#define ISPCS_FILTER_ID_CH_REG(n) (0x3000 + (0x0100 * (n)))
+
+#define ISPCS_DT_CODE03_CH_REG(n) (0x3008 + (0x100 * (n)))
+#define ISPCS_DT_CODE03_EN3 BIT(31)
+#define ISPCS_DT_CODE03_DT3(dt) (((dt) & 0x3f) << 24)
+#define ISPCS_DT_CODE03_EN2 BIT(23)
+#define ISPCS_DT_CODE03_DT2(dt) (((dt) & 0x3f) << 16)
+#define ISPCS_DT_CODE03_EN1 BIT(15)
+#define ISPCS_DT_CODE03_DT1(dt) (((dt) & 0x3f) << 8)
+#define ISPCS_DT_CODE03_EN0 BIT(7)
+#define ISPCS_DT_CODE03_DT0(dt) ((dt) & 0x3f)
+
+struct rcar_isp_format {
+ u32 code;
+ unsigned int datatype;
+ unsigned int procmode;
+};
+
+static const struct rcar_isp_format rcar_isp_formats[] = {
+ { .code = MEDIA_BUS_FMT_RGB888_1X24, .datatype = 0x24, .procmode = 0x15 },
+ { .code = MEDIA_BUS_FMT_Y10_1X10, .datatype = 0x2b, .procmode = 0x10 },
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .procmode = 0x0c },
+ { .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = 0x1e, .procmode = 0x0c },
+ { .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = 0x1e, .procmode = 0x0c },
+ { .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = 0x1e, .procmode = 0x0c },
+};
+
+static const struct rcar_isp_format *risp_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rcar_isp_formats); i++) {
+ if (rcar_isp_formats[i].code == code)
+ return &rcar_isp_formats[i];
+ }
+
+ return NULL;
+}
+
+enum rcar_isp_input {
+ RISP_CSI_INPUT0,
+ RISP_CSI_INPUT1,
+};
+
+enum rcar_isp_pads {
+ RCAR_ISP_SINK,
+ RCAR_ISP_PORT0,
+ RCAR_ISP_PORT1,
+ RCAR_ISP_PORT2,
+ RCAR_ISP_PORT3,
+ RCAR_ISP_PORT4,
+ RCAR_ISP_PORT5,
+ RCAR_ISP_PORT6,
+ RCAR_ISP_PORT7,
+ RCAR_ISP_NUM_PADS,
+};
+
+struct rcar_isp {
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *rstc;
+
+ enum rcar_isp_input csi_input;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RCAR_ISP_NUM_PADS];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *remote;
+
+ struct mutex lock; /* Protects mf and stream_count. */
+ struct v4l2_mbus_framefmt mf;
+ int stream_count;
+};
+
+static inline struct rcar_isp *sd_to_isp(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rcar_isp, subdev);
+}
+
+static inline struct rcar_isp *notifier_to_isp(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct rcar_isp, notifier);
+}
+
+static void risp_write(struct rcar_isp *isp, u32 offset, u32 value)
+{
+ iowrite32(value, isp->base + offset);
+}
+
+static u32 risp_read(struct rcar_isp *isp, u32 offset)
+{
+ return ioread32(isp->base + offset);
+}
+
+static int risp_power_on(struct rcar_isp *isp)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(isp->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = reset_control_deassert(isp->rstc);
+ if (ret < 0) {
+ pm_runtime_put(isp->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void risp_power_off(struct rcar_isp *isp)
+{
+ reset_control_assert(isp->rstc);
+ pm_runtime_put(isp->dev);
+}
+
+static int risp_start(struct rcar_isp *isp)
+{
+ const struct rcar_isp_format *format;
+ unsigned int vc;
+ u32 sel_csi = 0;
+ int ret;
+
+ format = risp_code_to_fmt(isp->mf.code);
+ if (!format) {
+ dev_err(isp->dev, "Unsupported bus format\n");
+ return -EINVAL;
+ }
+
+ ret = risp_power_on(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power on ISP\n");
+ return ret;
+ }
+
+ /* Select CSI-2 input source. */
+ if (isp->csi_input == RISP_CSI_INPUT1)
+ sel_csi = ISPINPUTSEL0_SEL_CSI0;
+
+ risp_write(isp, ISPINPUTSEL0_REG,
+ risp_read(isp, ISPINPUTSEL0_REG) | sel_csi);
+
+ /* Configure Channel Selector. */
+ for (vc = 0; vc < 4; vc++) {
+ u8 ch = vc + 4;
+ u8 dt = format->datatype;
+
+ risp_write(isp, ISPCS_FILTER_ID_CH_REG(ch), BIT(vc));
+ risp_write(isp, ISPCS_DT_CODE03_CH_REG(ch),
+ ISPCS_DT_CODE03_EN3 | ISPCS_DT_CODE03_DT3(dt) |
+ ISPCS_DT_CODE03_EN2 | ISPCS_DT_CODE03_DT2(dt) |
+ ISPCS_DT_CODE03_EN1 | ISPCS_DT_CODE03_DT1(dt) |
+ ISPCS_DT_CODE03_EN0 | ISPCS_DT_CODE03_DT0(dt));
+ }
+
+ /* Setup processing method. */
+ risp_write(isp, ISPPROCMODE_DT_REG(format->datatype),
+ ISPPROCMODE_DT_PROC_MODE_VC3(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC2(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC1(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC0(format->procmode));
+
+ /* Start ISP. */
+ risp_write(isp, ISPSTART_REG, ISPSTART_START);
+
+ ret = v4l2_subdev_call(isp->remote, video, s_stream, 1);
+ if (ret)
+ risp_power_off(isp);
+
+ return ret;
+}
+
+static void risp_stop(struct rcar_isp *isp)
+{
+ v4l2_subdev_call(isp->remote, video, s_stream, 0);
+
+ /* Stop ISP. */
+ risp_write(isp, ISPSTART_REG, ISPSTART_STOP);
+
+ risp_power_off(isp);
+}
+
+static int risp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+ int ret = 0;
+
+ mutex_lock(&isp->lock);
+
+ if (!isp->remote) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (enable && isp->stream_count == 0) {
+ ret = risp_start(isp);
+ if (ret)
+ goto out;
+ } else if (!enable && isp->stream_count == 1) {
+ risp_stop(isp);
+ }
+
+ isp->stream_count += enable ? 1 : -1;
+out:
+ mutex_unlock(&isp->lock);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops risp_video_ops = {
+ .s_stream = risp_s_stream,
+};
+
+static int risp_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ mutex_lock(&isp->lock);
+
+ if (!risp_code_to_fmt(format->format.code))
+ format->format.code = rcar_isp_formats[0].code;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ isp->mf = format->format;
+ } else {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ *framefmt = format->format;
+ }
+
+ mutex_unlock(&isp->lock);
+
+ return 0;
+}
+
+static int risp_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+
+ mutex_lock(&isp->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ format->format = isp->mf;
+ else
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+
+ mutex_unlock(&isp->lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops risp_pad_ops = {
+ .set_fmt = risp_set_pad_format,
+ .get_fmt = risp_get_pad_format,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+static const struct v4l2_subdev_ops rcar_isp_subdev_ops = {
+ .video = &risp_video_ops,
+ .pad = &risp_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Async handling and registration of subdevices and links
+ */
+
+static int risp_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_isp *isp = notifier_to_isp(notifier);
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(isp->dev, "Failed to find pad for %s\n", subdev->name);
+ return pad;
+ }
+
+ isp->remote = subdev;
+
+ dev_dbg(isp->dev, "Bound %s pad: %d\n", subdev->name, pad);
+
+ return media_create_pad_link(&subdev->entity, pad,
+ &isp->subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void risp_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_isp *isp = notifier_to_isp(notifier);
+
+ isp->remote = NULL;
+
+ dev_dbg(isp->dev, "Unbind %s\n", subdev->name);
+}
+
+static const struct v4l2_async_notifier_operations risp_notify_ops = {
+ .bound = risp_notify_bound,
+ .unbind = risp_notify_unbind,
+};
+
+static int risp_parse_dt(struct rcar_isp *isp)
+{
+ struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode;
+ struct fwnode_handle *ep;
+ unsigned int id;
+ int ret;
+
+ for (id = 0; id < 2; id++) {
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev),
+ 0, id, 0);
+ if (ep)
+ break;
+ }
+
+ if (!ep) {
+ dev_err(isp->dev, "Not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ if (id == 1)
+ isp->csi_input = RISP_CSI_INPUT1;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ fwnode_handle_put(ep);
+
+ dev_dbg(isp->dev, "Found '%pOF'\n", to_of_node(fwnode));
+
+ v4l2_async_nf_init(&isp->notifier);
+ isp->notifier.ops = &risp_notify_ops;
+
+ asd = v4l2_async_nf_add_fwnode(&isp->notifier, fwnode,
+ struct v4l2_async_subdev);
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+
+ ret = v4l2_async_subdev_nf_register(&isp->subdev, &isp->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static const struct media_entity_operations risp_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int risp_probe_resources(struct rcar_isp *isp,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ isp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(isp->base))
+ return PTR_ERR(isp->base);
+
+ isp->rstc = devm_reset_control_get(&pdev->dev, NULL);
+
+ return PTR_ERR_OR_ZERO(isp->rstc);
+}
+
+static const struct of_device_id risp_of_id_table[] = {
+ { .compatible = "renesas,r8a779a0-isp" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, risp_of_id_table);
+
+static int risp_probe(struct platform_device *pdev)
+{
+ struct rcar_isp *isp;
+ unsigned int i;
+ int ret;
+
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->dev = &pdev->dev;
+
+ mutex_init(&isp->lock);
+
+ ret = risp_probe_resources(isp, pdev);
+ if (ret) {
+ dev_err(isp->dev, "Failed to get resources\n");
+ goto error_mutex;
+ }
+
+ platform_set_drvdata(pdev, isp);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = risp_parse_dt(isp);
+ if (ret)
+ goto error_pm;
+
+ isp->subdev.owner = THIS_MODULE;
+ isp->subdev.dev = &pdev->dev;
+ v4l2_subdev_init(&isp->subdev, &rcar_isp_subdev_ops);
+ v4l2_set_subdevdata(&isp->subdev, &pdev->dev);
+ snprintf(isp->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+ KBUILD_MODNAME, dev_name(&pdev->dev));
+ isp->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ isp->subdev.entity.function = MEDIA_ENT_F_VID_MUX;
+ isp->subdev.entity.ops = &risp_entity_ops;
+
+ isp->pads[RCAR_ISP_SINK].flags = MEDIA_PAD_FL_SINK;
+ for (i = RCAR_ISP_PORT0; i < RCAR_ISP_NUM_PADS; i++)
+ isp->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&isp->subdev.entity, RCAR_ISP_NUM_PADS,
+ isp->pads);
+ if (ret)
+ goto error_notifier;
+
+ ret = v4l2_async_register_subdev(&isp->subdev);
+ if (ret < 0)
+ goto error_notifier;
+
+ dev_info(isp->dev, "Using CSI-2 input: %u\n", isp->csi_input);
+
+ return 0;
+error_notifier:
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+error_pm:
+ pm_runtime_disable(&pdev->dev);
+error_mutex:
+ mutex_destroy(&isp->lock);
+
+ return ret;
+}
+
+static int risp_remove(struct platform_device *pdev)
+{
+ struct rcar_isp *isp = platform_get_drvdata(pdev);
+
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ v4l2_async_unregister_subdev(&isp->subdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ mutex_destroy(&isp->lock);
+
+ return 0;
+}
+
+static struct platform_driver rcar_isp_driver = {
+ .driver = {
+ .name = "rcar-isp",
+ .of_match_table = risp_of_id_table,
+ },
+ .probe = risp_probe,
+ .remove = risp_remove,
+};
+
+module_platform_driver(rcar_isp_driver);
+
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_DESCRIPTION("Renesas R-Car ISP Channel Selector driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 33957cc9118c..1d92cc8ede8f 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -45,188 +45,7 @@
#define v4l2_dev_to_vin(d) container_of(d, struct rvin_dev, v4l2_dev)
/* -----------------------------------------------------------------------------
- * Media Controller link notification
- */
-
-/* group lock should be held when calling this function. */
-static int rvin_group_entity_to_csi_id(struct rvin_group *group,
- struct media_entity *entity)
-{
- struct v4l2_subdev *sd;
- unsigned int i;
-
- sd = media_entity_to_v4l2_subdev(entity);
-
- for (i = 0; i < RVIN_CSI_MAX; i++)
- if (group->csi[i].subdev == sd)
- return i;
-
- return -ENODEV;
-}
-
-static unsigned int rvin_group_get_mask(struct rvin_dev *vin,
- enum rvin_csi_id csi_id,
- unsigned char channel)
-{
- const struct rvin_group_route *route;
- unsigned int mask = 0;
-
- for (route = vin->info->routes; route->mask; route++) {
- if (route->vin == vin->id &&
- route->csi == csi_id &&
- route->channel == channel) {
- vin_dbg(vin,
- "Adding route: vin: %d csi: %d channel: %d\n",
- route->vin, route->csi, route->channel);
- mask |= route->mask;
- }
- }
-
- return mask;
-}
-
-/*
- * Link setup for the links between a VIN and a CSI-2 receiver is a bit
- * complex. The reason for this is that the register controlling routing
- * is not present in each VIN instance. There are special VINs which
- * control routing for themselves and other VINs. There are not many
- * different possible links combinations that can be enabled at the same
- * time, therefor all already enabled links which are controlled by a
- * master VIN need to be taken into account when making the decision
- * if a new link can be enabled or not.
- *
- * 1. Find out which VIN the link the user tries to enable is connected to.
- * 2. Lookup which master VIN controls the links for this VIN.
- * 3. Start with a bitmask with all bits set.
- * 4. For each previously enabled link from the master VIN bitwise AND its
- * route mask (see documentation for mask in struct rvin_group_route)
- * with the bitmask.
- * 5. Bitwise AND the mask for the link the user tries to enable to the bitmask.
- * 6. If the bitmask is not empty at this point the new link can be enabled
- * while keeping all previous links enabled. Update the CHSEL value of the
- * master VIN and inform the user that the link could be enabled.
- *
- * Please note that no link can be enabled if any VIN in the group is
- * currently open.
- */
-static int rvin_group_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
-{
- struct rvin_group *group = container_of(link->graph_obj.mdev,
- struct rvin_group, mdev);
- unsigned int master_id, channel, mask_new, i;
- unsigned int mask = ~0;
- struct media_entity *entity;
- struct video_device *vdev;
- struct media_pad *csi_pad;
- struct rvin_dev *vin = NULL;
- int csi_id, ret;
-
- ret = v4l2_pipeline_link_notify(link, flags, notification);
- if (ret)
- return ret;
-
- /* Only care about link enablement for VIN nodes. */
- if (!(flags & MEDIA_LNK_FL_ENABLED) ||
- !is_media_entity_v4l2_video_device(link->sink->entity))
- return 0;
-
- /*
- * Don't allow link changes if any entity in the graph is
- * streaming, modifying the CHSEL register fields can disrupt
- * running streams.
- */
- media_device_for_each_entity(entity, &group->mdev)
- if (entity->stream_count)
- return -EBUSY;
-
- mutex_lock(&group->lock);
-
- /* Find the master VIN that controls the routes. */
- vdev = media_entity_to_video_device(link->sink->entity);
- vin = container_of(vdev, struct rvin_dev, vdev);
- master_id = rvin_group_id_to_master(vin->id);
-
- if (WARN_ON(!group->vin[master_id])) {
- ret = -ENODEV;
- goto out;
- }
-
- /* Build a mask for already enabled links. */
- for (i = master_id; i < master_id + 4; i++) {
- if (!group->vin[i])
- continue;
-
- /* Get remote CSI-2, if any. */
- csi_pad = media_entity_remote_pad(
- &group->vin[i]->vdev.entity.pads[0]);
- if (!csi_pad)
- continue;
-
- csi_id = rvin_group_entity_to_csi_id(group, csi_pad->entity);
- channel = rvin_group_csi_pad_to_channel(csi_pad->index);
-
- mask &= rvin_group_get_mask(group->vin[i], csi_id, channel);
- }
-
- /* Add the new link to the existing mask and check if it works. */
- csi_id = rvin_group_entity_to_csi_id(group, link->source->entity);
-
- if (csi_id == -ENODEV) {
- struct v4l2_subdev *sd;
-
- /*
- * Make sure the source entity subdevice is registered as
- * a parallel input of one of the enabled VINs if it is not
- * one of the CSI-2 subdevices.
- *
- * No hardware configuration required for parallel inputs,
- * we can return here.
- */
- sd = media_entity_to_v4l2_subdev(link->source->entity);
- for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (group->vin[i] &&
- group->vin[i]->parallel.subdev == sd) {
- group->vin[i]->is_csi = false;
- ret = 0;
- goto out;
- }
- }
-
- vin_err(vin, "Subdevice %s not registered to any VIN\n",
- link->source->entity->name);
- ret = -ENODEV;
- goto out;
- }
-
- channel = rvin_group_csi_pad_to_channel(link->source->index);
- mask_new = mask & rvin_group_get_mask(vin, csi_id, channel);
- vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
-
- if (!mask_new) {
- ret = -EMLINK;
- goto out;
- }
-
- /* New valid CHSEL found, set the new value. */
- ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
- if (ret)
- goto out;
-
- vin->is_csi = true;
-
-out:
- mutex_unlock(&group->lock);
-
- return ret;
-}
-
-static const struct media_device_ops rvin_media_ops = {
- .link_notify = rvin_group_link_notify,
-};
-
-/* -----------------------------------------------------------------------------
- * Gen3 CSI2 Group Allocator
+ * Gen3 Group Allocator
*/
/* FIXME: This should if we find a system that supports more
@@ -247,7 +66,9 @@ static void rvin_group_cleanup(struct rvin_group *group)
mutex_destroy(&group->lock);
}
-static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin)
+static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin,
+ int (*link_setup)(struct rvin_dev *),
+ const struct media_device_ops *ops)
{
struct media_device *mdev = &group->mdev;
const struct of_device_id *match;
@@ -263,8 +84,10 @@ static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin)
vin_dbg(vin, "found %u enabled VIN's in DT", group->count);
+ group->link_setup = link_setup;
+
mdev->dev = vin->dev;
- mdev->ops = &rvin_media_ops;
+ mdev->ops = ops;
match = of_match_node(vin->dev->driver->of_match_table,
vin->dev->of_node);
@@ -295,7 +118,9 @@ static void rvin_group_release(struct kref *kref)
mutex_unlock(&rvin_group_lock);
}
-static int rvin_group_get(struct rvin_dev *vin)
+static int rvin_group_get(struct rvin_dev *vin,
+ int (*link_setup)(struct rvin_dev *),
+ const struct media_device_ops *ops)
{
struct rvin_group *group;
u32 id;
@@ -327,7 +152,7 @@ static int rvin_group_get(struct rvin_dev *vin)
goto err_group;
}
- ret = rvin_group_init(group, vin);
+ ret = rvin_group_init(group, vin, link_setup, ops);
if (ret) {
kfree(group);
vin_err(vin, "Failed to initialize group\n");
@@ -383,6 +208,213 @@ out:
kref_put(&group->refcount, rvin_group_release);
}
+/* group lock should be held when calling this function. */
+static int rvin_group_entity_to_remote_id(struct rvin_group *group,
+ struct media_entity *entity)
+{
+ struct v4l2_subdev *sd;
+ unsigned int i;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ for (i = 0; i < RVIN_REMOTES_MAX; i++)
+ if (group->remotes[i].subdev == sd)
+ return i;
+
+ return -ENODEV;
+}
+
+static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+ int ret;
+
+ ret = media_device_register(&vin->group->mdev);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
+ if (ret) {
+ vin_err(vin, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ /* Register all video nodes for the group. */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i] &&
+ !video_is_registered(&vin->group->vin[i]->vdev)) {
+ ret = rvin_v4l2_register(vin->group->vin[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return vin->group->link_setup(vin);
+}
+
+static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ for (i = 0; i < RCAR_VIN_NUM; i++)
+ if (vin->group->vin[i])
+ rvin_v4l2_unregister(vin->group->vin[i]);
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->remotes[i].asd != asd)
+ continue;
+ vin->group->remotes[i].subdev = NULL;
+ vin_dbg(vin, "Unbind %s from slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ media_device_unregister(&vin->group->mdev);
+}
+
+static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->remotes[i].asd != asd)
+ continue;
+ vin->group->remotes[i].subdev = subdev;
+ vin_dbg(vin, "Bound %s to slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations rvin_group_notify_ops = {
+ .bound = rvin_group_notify_bound,
+ .unbind = rvin_group_notify_unbind,
+ .complete = rvin_group_notify_complete,
+};
+
+static int rvin_group_parse_of(struct rvin_dev *vin, unsigned int port,
+ unsigned int id)
+{
+ struct fwnode_handle *ep, *fwnode;
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_subdev *asd;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), port, id, 0);
+ if (!ep)
+ return 0;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ asd = v4l2_async_nf_add_fwnode(&vin->group->notifier, fwnode,
+ struct v4l2_async_subdev);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto out;
+ }
+
+ vin->group->remotes[vep.base.id].asd = asd;
+
+ vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
+ to_of_node(fwnode), vep.base.id);
+out:
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+static void rvin_group_notifier_cleanup(struct rvin_dev *vin)
+{
+ mutex_lock(&vin->group->lock);
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_nf_unregister(&vin->group->notifier);
+ v4l2_async_nf_cleanup(&vin->group->notifier);
+ }
+ mutex_unlock(&vin->group->lock);
+}
+
+static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
+ unsigned int max_id)
+{
+ unsigned int count = 0, vin_mask = 0;
+ unsigned int i, id;
+ int ret;
+
+ mutex_lock(&vin->group->lock);
+
+ /* If not all VIN's are registered don't register the notifier. */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i]) {
+ count++;
+ vin_mask |= BIT(i);
+ }
+ }
+
+ if (vin->group->count != count) {
+ mutex_unlock(&vin->group->lock);
+ return 0;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ v4l2_async_nf_init(&vin->group->notifier);
+
+ /*
+ * Some subdevices may overlap but the parser function can handle it and
+ * each subdevice will only be registered once with the group notifier.
+ */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (!(vin_mask & BIT(i)))
+ continue;
+
+ for (id = 0; id < max_id; id++) {
+ if (vin->group->remotes[id].asd)
+ continue;
+
+ ret = rvin_group_parse_of(vin->group->vin[i], port, id);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (list_empty(&vin->group->notifier.asd_list))
+ return 0;
+
+ vin->group->notifier.ops = &rvin_group_notify_ops;
+ ret = v4l2_async_nf_register(&vin->v4l2_dev, &vin->group->notifier);
+ if (ret < 0) {
+ vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_nf_cleanup(&vin->group->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* Controls
*/
@@ -405,6 +437,45 @@ static const struct v4l2_ctrl_ops rvin_ctrl_ops = {
.s_ctrl = rvin_s_ctrl,
};
+static void rvin_free_controls(struct rvin_dev *vin)
+{
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ vin->vdev.ctrl_handler = NULL;
+}
+
+static int rvin_create_controls(struct rvin_dev *vin, struct v4l2_subdev *subdev)
+{
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ if (ret < 0)
+ return ret;
+
+ /* The VIN directly deals with alpha component. */
+ v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (vin->ctrl_handler.error) {
+ ret = vin->ctrl_handler.error;
+ rvin_free_controls(vin);
+ return ret;
+ }
+
+ /* For the non-MC mode add controls from the subdevice. */
+ if (subdev) {
+ ret = v4l2_ctrl_add_handler(&vin->ctrl_handler,
+ subdev->ctrl_handler, NULL, true);
+ if (ret < 0) {
+ rvin_free_controls(vin);
+ return ret;
+ }
+ }
+
+ vin->vdev.ctrl_handler = &vin->ctrl_handler;
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* Async notifier
*/
@@ -490,28 +561,10 @@ static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
return ret;
/* Add the controls */
- ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ ret = rvin_create_controls(vin, subdev);
if (ret < 0)
return ret;
- v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
- V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
-
- if (vin->ctrl_handler.error) {
- ret = vin->ctrl_handler.error;
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- return ret;
- }
-
- ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler,
- NULL, true);
- if (ret < 0) {
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- return ret;
- }
-
- vin->vdev.ctrl_handler = &vin->ctrl_handler;
-
vin->parallel.subdev = subdev;
return 0;
@@ -522,10 +575,8 @@ static void rvin_parallel_subdevice_detach(struct rvin_dev *vin)
rvin_v4l2_unregister(vin);
vin->parallel.subdev = NULL;
- if (!vin->info->use_mc) {
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- vin->vdev.ctrl_handler = NULL;
- }
+ if (!vin->info->use_mc)
+ rvin_free_controls(vin);
}
static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier)
@@ -641,8 +692,8 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
goto out;
}
- asd = v4l2_async_notifier_add_fwnode_subdev(&vin->notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(&vin->notifier, fwnode,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
@@ -657,28 +708,33 @@ out:
return ret;
}
+static void rvin_parallel_cleanup(struct rvin_dev *vin)
+{
+ v4l2_async_nf_unregister(&vin->notifier);
+ v4l2_async_nf_cleanup(&vin->notifier);
+}
+
static int rvin_parallel_init(struct rvin_dev *vin)
{
int ret;
- v4l2_async_notifier_init(&vin->notifier);
+ v4l2_async_nf_init(&vin->notifier);
ret = rvin_parallel_parse_of(vin);
if (ret)
return ret;
- /* If using mc, it's fine not to have any input registered. */
if (!vin->parallel.asd)
- return vin->info->use_mc ? 0 : -ENODEV;
+ return -ENODEV;
vin_dbg(vin, "Found parallel subdevice %pOF\n",
to_of_node(vin->parallel.asd->match.fwnode));
vin->notifier.ops = &rvin_parallel_notify_ops;
- ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
+ ret = v4l2_async_nf_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&vin->notifier);
+ v4l2_async_nf_cleanup(&vin->notifier);
return ret;
}
@@ -686,36 +742,175 @@ static int rvin_parallel_init(struct rvin_dev *vin)
}
/* -----------------------------------------------------------------------------
- * Group async notifier
+ * CSI-2
*/
-static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
+static unsigned int rvin_csi2_get_mask(struct rvin_dev *vin,
+ enum rvin_csi_id csi_id,
+ unsigned char channel)
{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
const struct rvin_group_route *route;
- unsigned int i;
- int ret;
+ unsigned int mask = 0;
- ret = media_device_register(&vin->group->mdev);
+ for (route = vin->info->routes; route->mask; route++) {
+ if (route->vin == vin->id &&
+ route->csi == csi_id &&
+ route->channel == channel) {
+ vin_dbg(vin,
+ "Adding route: vin: %d csi: %d channel: %d\n",
+ route->vin, route->csi, route->channel);
+ mask |= route->mask;
+ }
+ }
+
+ return mask;
+}
+
+/*
+ * Link setup for the links between a VIN and a CSI-2 receiver is a bit
+ * complex. The reason for this is that the register controlling routing
+ * is not present in each VIN instance. There are special VINs which
+ * control routing for themselves and other VINs. There are not many
+ * different possible links combinations that can be enabled at the same
+ * time, therefor all already enabled links which are controlled by a
+ * master VIN need to be taken into account when making the decision
+ * if a new link can be enabled or not.
+ *
+ * 1. Find out which VIN the link the user tries to enable is connected to.
+ * 2. Lookup which master VIN controls the links for this VIN.
+ * 3. Start with a bitmask with all bits set.
+ * 4. For each previously enabled link from the master VIN bitwise AND its
+ * route mask (see documentation for mask in struct rvin_group_route)
+ * with the bitmask.
+ * 5. Bitwise AND the mask for the link the user tries to enable to the bitmask.
+ * 6. If the bitmask is not empty at this point the new link can be enabled
+ * while keeping all previous links enabled. Update the CHSEL value of the
+ * master VIN and inform the user that the link could be enabled.
+ *
+ * Please note that no link can be enabled if any VIN in the group is
+ * currently open.
+ */
+static int rvin_csi2_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct rvin_group *group = container_of(link->graph_obj.mdev,
+ struct rvin_group, mdev);
+ unsigned int master_id, channel, mask_new, i;
+ unsigned int mask = ~0;
+ struct media_entity *entity;
+ struct video_device *vdev;
+ struct media_pad *csi_pad;
+ struct rvin_dev *vin = NULL;
+ int csi_id, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
if (ret)
return ret;
- ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
- if (ret) {
- vin_err(vin, "Failed to register subdev nodes\n");
- return ret;
+ /* Only care about link enablement for VIN nodes. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED) ||
+ !is_media_entity_v4l2_video_device(link->sink->entity))
+ return 0;
+
+ /*
+ * Don't allow link changes if any entity in the graph is
+ * streaming, modifying the CHSEL register fields can disrupt
+ * running streams.
+ */
+ media_device_for_each_entity(entity, &group->mdev)
+ if (entity->stream_count)
+ return -EBUSY;
+
+ mutex_lock(&group->lock);
+
+ /* Find the master VIN that controls the routes. */
+ vdev = media_entity_to_video_device(link->sink->entity);
+ vin = container_of(vdev, struct rvin_dev, vdev);
+ master_id = rvin_group_id_to_master(vin->id);
+
+ if (WARN_ON(!group->vin[master_id])) {
+ ret = -ENODEV;
+ goto out;
}
- /* Register all video nodes for the group. */
- for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (vin->group->vin[i] &&
- !video_is_registered(&vin->group->vin[i]->vdev)) {
- ret = rvin_v4l2_register(vin->group->vin[i]);
- if (ret)
- return ret;
+ /* Build a mask for already enabled links. */
+ for (i = master_id; i < master_id + 4; i++) {
+ if (!group->vin[i])
+ continue;
+
+ /* Get remote CSI-2, if any. */
+ csi_pad = media_entity_remote_pad(
+ &group->vin[i]->vdev.entity.pads[0]);
+ if (!csi_pad)
+ continue;
+
+ csi_id = rvin_group_entity_to_remote_id(group, csi_pad->entity);
+ channel = rvin_group_csi_pad_to_channel(csi_pad->index);
+
+ mask &= rvin_csi2_get_mask(group->vin[i], csi_id, channel);
+ }
+
+ /* Add the new link to the existing mask and check if it works. */
+ csi_id = rvin_group_entity_to_remote_id(group, link->source->entity);
+
+ if (csi_id == -ENODEV) {
+ struct v4l2_subdev *sd;
+
+ /*
+ * Make sure the source entity subdevice is registered as
+ * a parallel input of one of the enabled VINs if it is not
+ * one of the CSI-2 subdevices.
+ *
+ * No hardware configuration required for parallel inputs,
+ * we can return here.
+ */
+ sd = media_entity_to_v4l2_subdev(link->source->entity);
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (group->vin[i] &&
+ group->vin[i]->parallel.subdev == sd) {
+ group->vin[i]->is_csi = false;
+ ret = 0;
+ goto out;
+ }
}
+
+ vin_err(vin, "Subdevice %s not registered to any VIN\n",
+ link->source->entity->name);
+ ret = -ENODEV;
+ goto out;
}
+ channel = rvin_group_csi_pad_to_channel(link->source->index);
+ mask_new = mask & rvin_csi2_get_mask(vin, csi_id, channel);
+ vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
+
+ if (!mask_new) {
+ ret = -EMLINK;
+ goto out;
+ }
+
+ /* New valid CHSEL found, set the new value. */
+ ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
+ if (ret)
+ goto out;
+
+ vin->is_csi = true;
+
+out:
+ mutex_unlock(&group->lock);
+
+ return ret;
+}
+
+static const struct media_device_ops rvin_csi2_media_ops = {
+ .link_notify = rvin_csi2_link_notify,
+};
+
+static int rvin_csi2_setup_links(struct rvin_dev *vin)
+{
+ const struct rvin_group_route *route;
+ int ret = -EINVAL;
+
/* Create all media device links between VINs and CSI-2's. */
mutex_lock(&vin->group->lock);
for (route = vin->info->routes; route->mask; route++) {
@@ -732,10 +927,10 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
continue;
/* Check that CSI-2 is part of the group. */
- if (!vin->group->csi[route->csi].subdev)
+ if (!vin->group->remotes[route->csi].subdev)
continue;
- source = &vin->group->csi[route->csi].subdev->entity;
+ source = &vin->group->remotes[route->csi].subdev->entity;
source_idx = rvin_group_csi_channel_to_pad(route->channel);
source_pad = &source->pads[source_idx];
@@ -758,167 +953,107 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
-static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
-{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- unsigned int i;
-
- for (i = 0; i < RCAR_VIN_NUM; i++)
- if (vin->group->vin[i])
- rvin_v4l2_unregister(vin->group->vin[i]);
-
- mutex_lock(&vin->group->lock);
-
- for (i = 0; i < RVIN_CSI_MAX; i++) {
- if (vin->group->csi[i].asd != asd)
- continue;
- vin->group->csi[i].subdev = NULL;
- vin_dbg(vin, "Unbind CSI-2 %s from slot %u\n", subdev->name, i);
- break;
- }
-
- mutex_unlock(&vin->group->lock);
-
- media_device_unregister(&vin->group->mdev);
-}
-
-static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static void rvin_csi2_cleanup(struct rvin_dev *vin)
{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- unsigned int i;
-
- mutex_lock(&vin->group->lock);
-
- for (i = 0; i < RVIN_CSI_MAX; i++) {
- if (vin->group->csi[i].asd != asd)
- continue;
- vin->group->csi[i].subdev = subdev;
- vin_dbg(vin, "Bound CSI-2 %s to slot %u\n", subdev->name, i);
- break;
- }
-
- mutex_unlock(&vin->group->lock);
-
- return 0;
+ rvin_parallel_cleanup(vin);
+ rvin_group_notifier_cleanup(vin);
+ rvin_group_put(vin);
+ rvin_free_controls(vin);
}
-static const struct v4l2_async_notifier_operations rvin_group_notify_ops = {
- .bound = rvin_group_notify_bound,
- .unbind = rvin_group_notify_unbind,
- .complete = rvin_group_notify_complete,
-};
-
-static int rvin_mc_parse_of(struct rvin_dev *vin, unsigned int id)
+static int rvin_csi2_init(struct rvin_dev *vin)
{
- struct fwnode_handle *ep, *fwnode;
- struct v4l2_fwnode_endpoint vep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY,
- };
- struct v4l2_async_subdev *asd;
int ret;
- ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), 1, id, 0);
- if (!ep)
- return 0;
+ vin->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
+ if (ret)
+ return ret;
- fwnode = fwnode_graph_get_remote_endpoint(ep);
- ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- fwnode_handle_put(ep);
- if (ret) {
- vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode));
- ret = -EINVAL;
- goto out;
- }
+ ret = rvin_create_controls(vin, NULL);
+ if (ret < 0)
+ return ret;
- if (!of_device_is_available(to_of_node(fwnode))) {
- vin_dbg(vin, "OF device %pOF disabled, ignoring\n",
- to_of_node(fwnode));
- ret = -ENOTCONN;
- goto out;
- }
+ ret = rvin_group_get(vin, rvin_csi2_setup_links, &rvin_csi2_media_ops);
+ if (ret)
+ goto err_controls;
- asd = v4l2_async_notifier_add_fwnode_subdev(&vin->group->notifier,
- fwnode,
- struct v4l2_async_subdev);
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
- goto out;
- }
+ /* It's OK to not have a parallel subdevice. */
+ ret = rvin_parallel_init(vin);
+ if (ret && ret != -ENODEV)
+ goto err_group;
- vin->group->csi[vep.base.id].asd = asd;
+ ret = rvin_group_notifier_init(vin, 1, RVIN_CSI_MAX);
+ if (ret)
+ goto err_parallel;
- vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
- to_of_node(fwnode), vep.base.id);
-out:
- fwnode_handle_put(fwnode);
+ return 0;
+err_parallel:
+ rvin_parallel_cleanup(vin);
+err_group:
+ rvin_group_put(vin);
+err_controls:
+ rvin_free_controls(vin);
return ret;
}
-static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
+/* -----------------------------------------------------------------------------
+ * ISP
+ */
+
+static int rvin_isp_setup_links(struct rvin_dev *vin)
{
- unsigned int count = 0, vin_mask = 0;
- unsigned int i, id;
- int ret;
+ unsigned int i;
+ int ret = -EINVAL;
+ /* Create all media device links between VINs and ISP's. */
mutex_lock(&vin->group->lock);
-
- /* If not all VIN's are registered don't register the notifier. */
for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (vin->group->vin[i]) {
- count++;
- vin_mask |= BIT(i);
- }
- }
+ struct media_pad *source_pad, *sink_pad;
+ struct media_entity *source, *sink;
+ unsigned int source_slot = i / 8;
+ unsigned int source_idx = i % 8 + 1;
- if (vin->group->count != count) {
- mutex_unlock(&vin->group->lock);
- return 0;
- }
+ if (!vin->group->vin[i])
+ continue;
- mutex_unlock(&vin->group->lock);
+ /* Check that ISP is part of the group. */
+ if (!vin->group->remotes[source_slot].subdev)
+ continue;
- v4l2_async_notifier_init(&vin->group->notifier);
+ source = &vin->group->remotes[source_slot].subdev->entity;
+ source_pad = &source->pads[source_idx];
- /*
- * Have all VIN's look for CSI-2 subdevices. Some subdevices will
- * overlap but the parser function can handle it, so each subdevice
- * will only be registered once with the group notifier.
- */
- for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (!(vin_mask & BIT(i)))
- continue;
+ sink = &vin->group->vin[i]->vdev.entity;
+ sink_pad = &sink->pads[0];
- for (id = 0; id < RVIN_CSI_MAX; id++) {
- if (vin->group->csi[id].asd)
- continue;
+ /* Skip if link already exists. */
+ if (media_entity_find_link(source_pad, sink_pad))
+ continue;
- ret = rvin_mc_parse_of(vin->group->vin[i], id);
- if (ret)
- return ret;
+ ret = media_create_pad_link(source, source_idx, sink, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ vin_err(vin, "Error adding link from %s to %s\n",
+ source->name, sink->name);
+ break;
}
}
+ mutex_unlock(&vin->group->lock);
- if (list_empty(&vin->group->notifier.asd_list))
- return 0;
-
- vin->group->notifier.ops = &rvin_group_notify_ops;
- ret = v4l2_async_notifier_register(&vin->v4l2_dev,
- &vin->group->notifier);
- if (ret < 0) {
- vin_err(vin, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- return ret;
- }
+ return ret;
+}
- return 0;
+static void rvin_isp_cleanup(struct rvin_dev *vin)
+{
+ rvin_group_notifier_cleanup(vin);
+ rvin_group_put(vin);
+ rvin_free_controls(vin);
}
-static int rvin_mc_init(struct rvin_dev *vin)
+static int rvin_isp_init(struct rvin_dev *vin)
{
int ret;
@@ -927,28 +1062,23 @@ static int rvin_mc_init(struct rvin_dev *vin)
if (ret)
return ret;
- ret = rvin_group_get(vin);
- if (ret)
- return ret;
-
- ret = rvin_mc_parse_of_graph(vin);
- if (ret)
- rvin_group_put(vin);
-
- ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 1);
+ ret = rvin_create_controls(vin, NULL);
if (ret < 0)
return ret;
- v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
- V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+ ret = rvin_group_get(vin, rvin_isp_setup_links, NULL);
+ if (ret)
+ goto err_controls;
- if (vin->ctrl_handler.error) {
- ret = vin->ctrl_handler.error;
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- return ret;
- }
+ ret = rvin_group_notifier_init(vin, 2, RVIN_ISP_MAX);
+ if (ret)
+ goto err_group;
- vin->vdev.ctrl_handler = &vin->ctrl_handler;
+ return 0;
+err_group:
+ rvin_group_put(vin);
+err_controls:
+ rvin_free_controls(vin);
return ret;
}
@@ -1325,6 +1455,15 @@ static const struct rvin_info rcar_info_r8a77995 = {
.routes = rcar_info_r8a77995_routes,
};
+static const struct rvin_info rcar_info_r8a779a0 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .use_isp = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+};
+
static const struct of_device_id rvin_of_id_table[] = {
{
.compatible = "renesas,vin-r8a774a1",
@@ -1386,6 +1525,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.compatible = "renesas,vin-r8a77995",
.data = &rcar_info_r8a77995,
},
+ {
+ .compatible = "renesas,vin-r8a779a0",
+ .data = &rcar_info_r8a779a0,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, rvin_of_id_table);
@@ -1434,38 +1577,22 @@ static int rcar_vin_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vin);
- if (vin->info->use_mc) {
- ret = rvin_mc_init(vin);
- if (ret)
- goto error_dma_unregister;
- }
+ if (vin->info->use_isp)
+ ret = rvin_isp_init(vin);
+ else if (vin->info->use_mc)
+ ret = rvin_csi2_init(vin);
+ else
+ ret = rvin_parallel_init(vin);
- ret = rvin_parallel_init(vin);
- if (ret)
- goto error_group_unregister;
+ if (ret) {
+ rvin_dma_unregister(vin);
+ return ret;
+ }
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
return 0;
-
-error_group_unregister:
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
-
- if (vin->info->use_mc) {
- mutex_lock(&vin->group->lock);
- if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
- v4l2_async_notifier_unregister(&vin->group->notifier);
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- }
- mutex_unlock(&vin->group->lock);
- rvin_group_put(vin);
- }
-
-error_dma_unregister:
- rvin_dma_unregister(vin);
-
- return ret;
}
static int rcar_vin_remove(struct platform_device *pdev)
@@ -1476,16 +1603,12 @@ static int rcar_vin_remove(struct platform_device *pdev)
rvin_v4l2_unregister(vin);
- v4l2_async_notifier_unregister(&vin->notifier);
- v4l2_async_notifier_cleanup(&vin->notifier);
-
- if (vin->info->use_mc) {
- v4l2_async_notifier_unregister(&vin->group->notifier);
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- rvin_group_put(vin);
- }
-
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ if (vin->info->use_isp)
+ rvin_isp_cleanup(vin);
+ else if (vin->info->use_mc)
+ rvin_csi2_cleanup(vin);
+ else
+ rvin_parallel_cleanup(vin);
rvin_dma_unregister(vin);
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index e28eff039688..11848d0c4a55 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -126,6 +126,12 @@ struct rcar_csi2;
#define PHTW_CWEN BIT(8)
#define PHTW_TESTDIN_CODE(n) ((n & 0xff))
+#define PHYFRX_REG 0x64
+#define PHYFRX_FORCERX_MODE_3 BIT(3)
+#define PHYFRX_FORCERX_MODE_2 BIT(2)
+#define PHYFRX_FORCERX_MODE_1 BIT(1)
+#define PHYFRX_FORCERX_MODE_0 BIT(0)
+
struct phtw_value {
u16 data;
u16 code;
@@ -136,6 +142,31 @@ struct rcsi2_mbps_reg {
u16 reg;
};
+static const struct rcsi2_mbps_reg phtw_mbps_v3u[] = {
+ { .mbps = 1500, .reg = 0xcc },
+ { .mbps = 1550, .reg = 0x1d },
+ { .mbps = 1600, .reg = 0x27 },
+ { .mbps = 1650, .reg = 0x30 },
+ { .mbps = 1700, .reg = 0x39 },
+ { .mbps = 1750, .reg = 0x42 },
+ { .mbps = 1800, .reg = 0x4b },
+ { .mbps = 1850, .reg = 0x55 },
+ { .mbps = 1900, .reg = 0x5e },
+ { .mbps = 1950, .reg = 0x67 },
+ { .mbps = 2000, .reg = 0x71 },
+ { .mbps = 2050, .reg = 0x79 },
+ { .mbps = 2100, .reg = 0x83 },
+ { .mbps = 2150, .reg = 0x8c },
+ { .mbps = 2200, .reg = 0x95 },
+ { .mbps = 2250, .reg = 0x9e },
+ { .mbps = 2300, .reg = 0xa7 },
+ { .mbps = 2350, .reg = 0xb0 },
+ { .mbps = 2400, .reg = 0xba },
+ { .mbps = 2450, .reg = 0xc3 },
+ { .mbps = 2500, .reg = 0xcc },
+ { /* sentinel */ },
+};
+
static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = {
{ .mbps = 80, .reg = 0x86 },
{ .mbps = 90, .reg = 0x86 },
@@ -200,6 +231,72 @@ static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = {
#define PHYPLL_REG 0x68
#define PHYPLL_HSFREQRANGE(n) ((n) << 16)
+static const struct rcsi2_mbps_reg hsfreqrange_v3u[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x10 },
+ { .mbps = 100, .reg = 0x20 },
+ { .mbps = 110, .reg = 0x30 },
+ { .mbps = 120, .reg = 0x01 },
+ { .mbps = 130, .reg = 0x11 },
+ { .mbps = 140, .reg = 0x21 },
+ { .mbps = 150, .reg = 0x31 },
+ { .mbps = 160, .reg = 0x02 },
+ { .mbps = 170, .reg = 0x12 },
+ { .mbps = 180, .reg = 0x22 },
+ { .mbps = 190, .reg = 0x32 },
+ { .mbps = 205, .reg = 0x03 },
+ { .mbps = 220, .reg = 0x13 },
+ { .mbps = 235, .reg = 0x23 },
+ { .mbps = 250, .reg = 0x33 },
+ { .mbps = 275, .reg = 0x04 },
+ { .mbps = 300, .reg = 0x14 },
+ { .mbps = 325, .reg = 0x25 },
+ { .mbps = 350, .reg = 0x35 },
+ { .mbps = 400, .reg = 0x05 },
+ { .mbps = 450, .reg = 0x16 },
+ { .mbps = 500, .reg = 0x26 },
+ { .mbps = 550, .reg = 0x37 },
+ { .mbps = 600, .reg = 0x07 },
+ { .mbps = 650, .reg = 0x18 },
+ { .mbps = 700, .reg = 0x28 },
+ { .mbps = 750, .reg = 0x39 },
+ { .mbps = 800, .reg = 0x09 },
+ { .mbps = 850, .reg = 0x19 },
+ { .mbps = 900, .reg = 0x29 },
+ { .mbps = 950, .reg = 0x3a },
+ { .mbps = 1000, .reg = 0x0a },
+ { .mbps = 1050, .reg = 0x1a },
+ { .mbps = 1100, .reg = 0x2a },
+ { .mbps = 1150, .reg = 0x3b },
+ { .mbps = 1200, .reg = 0x0b },
+ { .mbps = 1250, .reg = 0x1b },
+ { .mbps = 1300, .reg = 0x2b },
+ { .mbps = 1350, .reg = 0x3c },
+ { .mbps = 1400, .reg = 0x0c },
+ { .mbps = 1450, .reg = 0x1c },
+ { .mbps = 1500, .reg = 0x2c },
+ { .mbps = 1550, .reg = 0x3d },
+ { .mbps = 1600, .reg = 0x0d },
+ { .mbps = 1650, .reg = 0x1d },
+ { .mbps = 1700, .reg = 0x2e },
+ { .mbps = 1750, .reg = 0x3e },
+ { .mbps = 1800, .reg = 0x0e },
+ { .mbps = 1850, .reg = 0x1e },
+ { .mbps = 1900, .reg = 0x2f },
+ { .mbps = 1950, .reg = 0x3f },
+ { .mbps = 2000, .reg = 0x0f },
+ { .mbps = 2050, .reg = 0x40 },
+ { .mbps = 2100, .reg = 0x41 },
+ { .mbps = 2150, .reg = 0x42 },
+ { .mbps = 2200, .reg = 0x43 },
+ { .mbps = 2300, .reg = 0x45 },
+ { .mbps = 2350, .reg = 0x46 },
+ { .mbps = 2400, .reg = 0x47 },
+ { .mbps = 2450, .reg = 0x48 },
+ { .mbps = 2500, .reg = 0x49 },
+ { /* sentinel */ },
+};
+
static const struct rcsi2_mbps_reg hsfreqrange_h3_v3h_m3n[] = {
{ .mbps = 80, .reg = 0x00 },
{ .mbps = 90, .reg = 0x10 },
@@ -355,6 +452,7 @@ struct rcar_csi2_info {
unsigned int csi0clkfreqrange;
unsigned int num_channels;
bool clear_ulps;
+ bool use_isp;
};
struct rcar_csi2 {
@@ -370,9 +468,8 @@ struct rcar_csi2 {
struct v4l2_subdev *remote;
unsigned int remote_pad;
+ struct mutex lock; /* Protects mf and stream_count. */
struct v4l2_mbus_framefmt mf;
-
- struct mutex lock;
int stream_count;
unsigned short lanes;
@@ -553,6 +650,8 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
/* Code is validated in set_fmt. */
format = rcsi2_code_to_fmt(priv->mf.code);
+ if (!format)
+ return -EINVAL;
/*
* Enable all supported CSI-2 channels with virtual channel and
@@ -609,9 +708,12 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, PHTC_REG, 0);
/* Configure */
- rcsi2_write(priv, VCDT_REG, vcdt);
- if (vcdt2)
- rcsi2_write(priv, VCDT2_REG, vcdt2);
+ if (!priv->info->use_isp) {
+ rcsi2_write(priv, VCDT_REG, vcdt);
+ if (vcdt2)
+ rcsi2_write(priv, VCDT2_REG, vcdt2);
+ }
+
/* Lanes are zero indexed. */
rcsi2_write(priv, LSWAP_REG,
LSWAP_L0SEL(priv->lane_swap[0] - 1) |
@@ -636,6 +738,11 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, CSI0CLKFCPR_REG,
CSI0CLKFREQRANGE(priv->info->csi0clkfreqrange));
+ if (priv->info->use_isp)
+ rcsi2_write(priv, PHYFRX_REG,
+ PHYFRX_FORCERX_MODE_3 | PHYFRX_FORCERX_MODE_2 |
+ PHYFRX_FORCERX_MODE_1 | PHYFRX_FORCERX_MODE_0);
+
rcsi2_write(priv, PHYCNT_REG, phycnt);
rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN |
LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP);
@@ -647,6 +754,9 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
if (ret)
return ret;
+ if (priv->info->use_isp)
+ rcsi2_write(priv, PHYFRX_REG, 0);
+
/* Run post PHY start initialization, if needed. */
if (priv->info->phy_post_init) {
ret = priv->info->phy_post_init(priv);
@@ -725,6 +835,8 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
struct rcar_csi2 *priv = sd_to_csi2(sd);
struct v4l2_mbus_framefmt *framefmt;
+ mutex_lock(&priv->lock);
+
if (!rcsi2_code_to_fmt(format->format.code))
format->format.code = rcar_csi2_formats[0].code;
@@ -735,6 +847,8 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
*framefmt = format->format;
}
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -744,11 +858,15 @@ static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
+ mutex_lock(&priv->lock);
+
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
format->format = priv->mf;
else
format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -917,19 +1035,18 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
- v4l2_async_notifier_init(&priv->notifier);
+ v4l2_async_nf_init(&priv->notifier);
priv->notifier.ops = &rcar_csi2_notify_ops;
- asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(&priv->notifier, fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
- ret = v4l2_async_subdev_notifier_register(&priv->subdev,
- &priv->notifier);
+ ret = v4l2_async_subdev_nf_register(&priv->subdev, &priv->notifier);
if (ret)
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
return ret;
}
@@ -1063,6 +1180,62 @@ static int rcsi2_phy_post_init_v3m_e3(struct rcar_csi2 *priv)
return rcsi2_phtw_write_array(priv, step1);
}
+static int rcsi2_init_phtw_v3u(struct rcar_csi2 *priv,
+ unsigned int mbps)
+{
+ /* In case of 1500Mbps or less */
+ static const struct phtw_value step1[] = {
+ { .data = 0xcc, .code = 0xe2 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step2[] = {
+ { .data = 0x01, .code = 0xe3 },
+ { .data = 0x11, .code = 0xe4 },
+ { .data = 0x01, .code = 0xe5 },
+ { /* sentinel */ },
+ };
+
+ /* In case of 1500Mbps or less */
+ static const struct phtw_value step3[] = {
+ { .data = 0x38, .code = 0x08 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step4[] = {
+ { .data = 0x01, .code = 0x00 },
+ { .data = 0x4b, .code = 0xac },
+ { .data = 0x03, .code = 0x00 },
+ { .data = 0x80, .code = 0x07 },
+ { /* sentinel */ },
+ };
+
+ int ret;
+
+ if (mbps != 0 && mbps <= 1500)
+ ret = rcsi2_phtw_write_array(priv, step1);
+ else
+ ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3u, 0xe2);
+ if (ret)
+ return ret;
+
+ ret = rcsi2_phtw_write_array(priv, step2);
+ if (ret)
+ return ret;
+
+ if (mbps != 0 && mbps <= 1500) {
+ ret = rcsi2_phtw_write_array(priv, step3);
+ if (ret)
+ return ret;
+ }
+
+ ret = rcsi2_phtw_write_array(priv, step4);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
/* -----------------------------------------------------------------------------
* Platform Device Driver.
*/
@@ -1074,11 +1247,9 @@ static const struct media_entity_operations rcar_csi2_entity_ops = {
static int rcsi2_probe_resources(struct rcar_csi2 *priv,
struct platform_device *pdev)
{
- struct resource *res;
int irq, ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -1155,6 +1326,14 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = {
.num_channels = 2,
};
+static const struct rcar_csi2_info rcar_csi2_info_r8a779a0 = {
+ .init_phtw = rcsi2_init_phtw_v3u,
+ .hsfreqrange = hsfreqrange_v3u,
+ .csi0clkfreqrange = 0x20,
+ .clear_ulps = true,
+ .use_isp = true,
+};
+
static const struct of_device_id rcar_csi2_of_table[] = {
{
.compatible = "renesas,r8a774a1-csi2",
@@ -1200,6 +1379,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.compatible = "renesas,r8a77990-csi2",
.data = &rcar_csi2_info_r8a77990,
},
+ {
+ .compatible = "renesas,r8a779a0-csi2",
+ .data = &rcar_csi2_info_r8a779a0,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_csi2_of_table);
@@ -1220,7 +1403,7 @@ static int rcsi2_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rcar_csi2 *priv;
- unsigned int i;
+ unsigned int i, num_pads;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -1245,14 +1428,14 @@ static int rcsi2_probe(struct platform_device *pdev)
ret = rcsi2_probe_resources(priv, pdev);
if (ret) {
dev_err(priv->dev, "Failed to get resources\n");
- return ret;
+ goto error_mutex;
}
platform_set_drvdata(pdev, priv);
ret = rcsi2_parse_dt(priv);
if (ret)
- return ret;
+ goto error_mutex;
priv->subdev.owner = THIS_MODULE;
priv->subdev.dev = &pdev->dev;
@@ -1265,28 +1448,32 @@ static int rcsi2_probe(struct platform_device *pdev)
priv->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
priv->subdev.entity.ops = &rcar_csi2_entity_ops;
+ num_pads = priv->info->use_isp ? 2 : NR_OF_RCAR_CSI2_PAD;
+
priv->pads[RCAR_CSI2_SINK].flags = MEDIA_PAD_FL_SINK;
- for (i = RCAR_CSI2_SOURCE_VC0; i < NR_OF_RCAR_CSI2_PAD; i++)
+ for (i = RCAR_CSI2_SOURCE_VC0; i < num_pads; i++)
priv->pads[i].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_pads_init(&priv->subdev.entity, NR_OF_RCAR_CSI2_PAD,
+ ret = media_entity_pads_init(&priv->subdev.entity, num_pads,
priv->pads);
if (ret)
- goto error;
+ goto error_async;
pm_runtime_enable(&pdev->dev);
ret = v4l2_async_register_subdev(&priv->subdev);
if (ret < 0)
- goto error;
+ goto error_async;
dev_info(priv->dev, "%d lanes found\n", priv->lanes);
return 0;
-error:
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+error_async:
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+error_mutex:
+ mutex_destroy(&priv->lock);
return ret;
}
@@ -1295,12 +1482,14 @@ static int rcsi2_remove(struct platform_device *pdev)
{
struct rcar_csi2 *priv = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
v4l2_async_unregister_subdev(&priv->subdev);
pm_runtime_disable(&pdev->dev);
+ mutex_destroy(&priv->lock);
+
return 0;
}
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index f5f722ab1d4e..25ead9333d00 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -114,6 +114,7 @@
/* Video n Data Mode Register bits */
#define VNDMR_A8BIT(n) (((n) & 0xff) << 24)
#define VNDMR_A8BIT_MASK (0xff << 24)
+#define VNDMR_YMODE_Y8 (1 << 12)
#define VNDMR_EXRGB (1 << 8)
#define VNDMR_BPSM (1 << 4)
#define VNDMR_ABIT (1 << 2)
@@ -603,6 +604,7 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_GREY:
stride /= 2;
break;
default:
@@ -695,6 +697,7 @@ static int rvin_setup(struct rvin_dev *vin)
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
vnmc |= VNMC_INF_RAW8;
break;
default:
@@ -774,6 +777,14 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_PIX_FMT_SRGGB8:
dmr = 0;
break;
+ case V4L2_PIX_FMT_GREY:
+ if (input_is_yuv) {
+ dmr = VNDMR_DTMD_YCSEP | VNDMR_YMODE_Y8;
+ output_is_yuv = true;
+ } else {
+ dmr = 0;
+ }
+ break;
default:
vin_err(vin, "Invalid pixelformat (0x%x)\n",
vin->format.pixelformat);
@@ -783,16 +794,18 @@ static int rvin_setup(struct rvin_dev *vin)
/* Always update on field change */
vnmc |= VNMC_VUP;
- /* If input and output use the same colorspace, use bypass mode */
- if (input_is_yuv == output_is_yuv)
- vnmc |= VNMC_BPS;
-
- if (vin->info->model == RCAR_GEN3) {
- /* Select between CSI-2 and parallel input */
- if (vin->is_csi)
- vnmc &= ~VNMC_DPINE;
- else
- vnmc |= VNMC_DPINE;
+ if (!vin->info->use_isp) {
+ /* If input and output use the same colorspace, use bypass mode */
+ if (input_is_yuv == output_is_yuv)
+ vnmc |= VNMC_BPS;
+
+ if (vin->info->model == RCAR_GEN3) {
+ /* Select between CSI-2 and parallel input */
+ if (vin->is_csi)
+ vnmc &= ~VNMC_DPINE;
+ else
+ vnmc |= VNMC_DPINE;
+ }
}
/* Progressive or interlaced mode */
@@ -904,7 +917,8 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
vin->format.sizeimage / 2;
break;
}
- } else if (vin->state != RUNNING || list_empty(&vin->buf_list)) {
+ } else if ((vin->state != STOPPED && vin->state != RUNNING) ||
+ list_empty(&vin->buf_list)) {
vin->buf_hw[slot].buffer = NULL;
vin->buf_hw[slot].type = FULL;
phys_addr = vin->scratch_phys;
@@ -1145,6 +1159,10 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
if (vin->format.pixelformat != V4L2_PIX_FMT_SRGGB8)
return -EPIPE;
break;
+ case MEDIA_BUS_FMT_Y8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_GREY)
+ return -EPIPE;
+ break;
default:
return -EPIPE;
}
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 0d141155f0e3..a5bfa76fdac6 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -82,6 +82,10 @@ static const struct rvin_video_format rvin_formats[] = {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.bpp = 1,
},
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .bpp = 1,
+ },
};
const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
@@ -523,6 +527,24 @@ static int rvin_s_selection(struct file *file, void *fh,
return 0;
}
+static int rvin_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_g_parm_cap(&vin->vdev, sd, parm);
+}
+
+static int rvin_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_s_parm_cap(&vin->vdev, sd, parm);
+}
+
static int rvin_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
@@ -739,6 +761,9 @@ static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
.vidioc_g_selection = rvin_g_selection,
.vidioc_s_selection = rvin_s_selection,
+ .vidioc_g_parm = rvin_g_parm,
+ .vidioc_s_parm = rvin_s_parm,
+
.vidioc_g_pixelaspect = rvin_g_pixelaspect,
.vidioc_enum_input = rvin_enum_input,
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index b263ead4db2b..6c06320174a2 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -29,7 +29,7 @@
#define HW_BUFFER_MASK 0x7f
/* Max number on VIN instances that can be in a system */
-#define RCAR_VIN_NUM 8
+#define RCAR_VIN_NUM 32
struct rvin_group;
@@ -48,6 +48,18 @@ enum rvin_csi_id {
RVIN_CSI_MAX,
};
+enum rvin_isp_id {
+ RVIN_ISP0,
+ RVIN_ISP1,
+ RVIN_ISP2,
+ RVIN_ISP4,
+ RVIN_ISP_MAX,
+};
+
+#define RVIN_REMOTES_MAX \
+ (((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \
+ RVIN_CSI_MAX : RVIN_ISP_MAX)
+
/**
* enum rvin_dma_state - DMA states
* @STOPPED: No operation in progress
@@ -147,6 +159,7 @@ struct rvin_group_route {
* struct rvin_info - Information about the particular VIN implementation
* @model: VIN model
* @use_mc: use media controller instead of controlling subdevice
+ * @use_isp: the VIN is connected to the ISP and not to the CSI-2
* @nv12: support outputing NV12 pixel format
* @max_width: max input width the VIN supports
* @max_height: max input height the VIN supports
@@ -156,6 +169,7 @@ struct rvin_group_route {
struct rvin_info {
enum model_id model;
bool use_mc;
+ bool use_isp;
bool nv12;
unsigned int max_width;
@@ -267,8 +281,9 @@ struct rvin_dev {
* @count: number of enabled VIN instances found in DT
* @notifier: group notifier for CSI-2 async subdevices
* @vin: VIN instances which are part of the group
- * @csi: array of pairs of fwnode and subdev pointers
- * to all CSI-2 subdevices.
+ * @link_setup: Callback to create all links for the media graph
+ * @remotes: array of pairs of fwnode and subdev pointers
+ * to all remote subdevices.
*/
struct rvin_group {
struct kref refcount;
@@ -280,10 +295,12 @@ struct rvin_group {
struct v4l2_async_notifier notifier;
struct rvin_dev *vin[RCAR_VIN_NUM];
+ int (*link_setup)(struct rvin_dev *vin);
+
struct {
struct v4l2_async_subdev *asd;
struct v4l2_subdev *subdev;
- } csi[RVIN_CSI_MAX];
+ } remotes[RVIN_REMOTES_MAX];
};
int rvin_dma_register(struct rvin_dev *vin, int irq);
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 1e3b68a8743a..9a0982fa5c6b 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -1212,7 +1212,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
struct fwnode_handle *fwnode, *ep;
struct v4l2_async_subdev *asd;
- v4l2_async_notifier_init(notifier);
+ v4l2_async_nf_init(notifier);
ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node),
NULL);
@@ -1229,8 +1229,8 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
return -EINVAL;
}
- asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
@@ -1346,7 +1346,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
sdr->notifier.ops = &rcar_drif_notify_ops;
/* Register notifier */
- ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
+ ret = v4l2_async_nf_register(&sdr->v4l2_dev, &sdr->notifier);
if (ret < 0) {
dev_err(sdr->dev, "failed: notifier register ret %d\n", ret);
goto cleanup;
@@ -1355,7 +1355,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
return ret;
cleanup:
- v4l2_async_notifier_cleanup(&sdr->notifier);
+ v4l2_async_nf_cleanup(&sdr->notifier);
error:
v4l2_device_unregister(&sdr->v4l2_dev);
@@ -1365,8 +1365,8 @@ error:
/* V4L2 SDR device remove */
static void rcar_drif_sdr_remove(struct rcar_drif_sdr *sdr)
{
- v4l2_async_notifier_unregister(&sdr->notifier);
- v4l2_async_notifier_cleanup(&sdr->notifier);
+ v4l2_async_nf_unregister(&sdr->notifier);
+ v4l2_async_nf_cleanup(&sdr->notifier);
v4l2_device_unregister(&sdr->v4l2_dev);
}
@@ -1395,8 +1395,7 @@ static int rcar_drif_probe(struct platform_device *pdev)
}
/* Register map */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ch->base = devm_ioremap_resource(&pdev->dev, res);
+ ch->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ch->base))
return PTR_ERR(ch->base);
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 89aac60066d9..19de3c19bcca 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2256,7 +2256,6 @@ static int fdp1_probe(struct platform_device *pdev)
struct fdp1_dev *fdp1;
struct video_device *vfd;
struct device_node *fcp_node;
- struct resource *res;
struct clk *clk;
unsigned int i;
@@ -2283,8 +2282,7 @@ static int fdp1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fdp1);
/* Memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
+ fdp1->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fdp1->regs))
return PTR_ERR(fdp1->regs);
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index f57158bf2b11..56bb464629ed 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1590,7 +1590,6 @@ MODULE_DEVICE_TABLE(of, jpu_dt_ids);
static int jpu_probe(struct platform_device *pdev)
{
struct jpu *jpu;
- struct resource *res;
int ret;
unsigned int i;
@@ -1603,8 +1602,7 @@ static int jpu_probe(struct platform_device *pdev)
jpu->dev = &pdev->dev;
/* memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpu->regs = devm_ioremap_resource(&pdev->dev, res);
+ jpu->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpu->regs))
return PTR_ERR(jpu->regs);
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index f432032c7084..2e8dbacc414e 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -1513,12 +1513,12 @@ static int ceu_parse_platform_data(struct ceu_device *ceudev,
/* Setup the ceu subdevice and the async subdevice. */
async_sd = &pdata->subdevs[i];
- ceu_sd = v4l2_async_notifier_add_i2c_subdev(&ceudev->notifier,
- async_sd->i2c_adapter_id,
- async_sd->i2c_address,
- struct ceu_subdev);
+ ceu_sd = v4l2_async_nf_add_i2c(&ceudev->notifier,
+ async_sd->i2c_adapter_id,
+ async_sd->i2c_address,
+ struct ceu_subdev);
if (IS_ERR(ceu_sd)) {
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
return PTR_ERR(ceu_sd);
}
ceu_sd->mbus_flags = async_sd->flags;
@@ -1576,9 +1576,9 @@ static int ceu_parse_dt(struct ceu_device *ceudev)
}
/* Setup the ceu subdevice and the async subdevice. */
- ceu_sd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &ceudev->notifier, of_fwnode_handle(ep),
- struct ceu_subdev);
+ ceu_sd = v4l2_async_nf_add_fwnode_remote(&ceudev->notifier,
+ of_fwnode_handle(ep),
+ struct ceu_subdev);
if (IS_ERR(ceu_sd)) {
ret = PTR_ERR(ceu_sd);
goto error_cleanup;
@@ -1592,7 +1592,7 @@ static int ceu_parse_dt(struct ceu_device *ceudev)
return num_ep;
error_cleanup:
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
of_node_put(ep);
return ret;
}
@@ -1628,7 +1628,6 @@ static int ceu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct ceu_data *ceu_data;
struct ceu_device *ceudev;
- struct resource *res;
unsigned int irq;
int num_subdevs;
int ret;
@@ -1644,8 +1643,7 @@ static int ceu_probe(struct platform_device *pdev)
spin_lock_init(&ceudev->lock);
mutex_init(&ceudev->mlock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ceudev->base = devm_ioremap_resource(dev, res);
+ ceudev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ceudev->base)) {
ret = PTR_ERR(ceudev->base);
goto error_free_ceudev;
@@ -1669,7 +1667,7 @@ static int ceu_probe(struct platform_device *pdev)
if (ret)
goto error_pm_disable;
- v4l2_async_notifier_init(&ceudev->notifier);
+ v4l2_async_nf_init(&ceudev->notifier);
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
ceu_data = of_device_get_match_data(dev);
@@ -1691,8 +1689,7 @@ static int ceu_probe(struct platform_device *pdev)
ceudev->notifier.v4l2_dev = &ceudev->v4l2_dev;
ceudev->notifier.ops = &ceu_notify_ops;
- ret = v4l2_async_notifier_register(&ceudev->v4l2_dev,
- &ceudev->notifier);
+ ret = v4l2_async_nf_register(&ceudev->v4l2_dev, &ceudev->notifier);
if (ret)
goto error_cleanup;
@@ -1701,7 +1698,7 @@ static int ceu_probe(struct platform_device *pdev)
return 0;
error_cleanup:
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
error_v4l2_unregister:
v4l2_device_unregister(&ceudev->v4l2_dev);
error_pm_disable:
@@ -1718,9 +1715,9 @@ static int ceu_remove(struct platform_device *pdev)
pm_runtime_disable(ceudev->dev);
- v4l2_async_notifier_unregister(&ceudev->notifier);
+ v4l2_async_nf_unregister(&ceudev->notifier);
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
v4l2_device_unregister(&ceudev->v4l2_dev);
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 6759091b15e0..4de5e8d2b261 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -800,7 +800,6 @@ static int rga_probe(struct platform_device *pdev)
{
struct rockchip_rga *rga;
struct video_device *vfd;
- struct resource *res;
int ret = 0;
int irq;
@@ -821,9 +820,7 @@ static int rga_probe(struct platform_device *pdev)
pm_runtime_enable(rga->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- rga->regs = devm_ioremap_resource(rga->dev, res);
+ rga->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rga->regs)) {
ret = PTR_ERR(rga->regs);
goto err_put_clk;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index 41988eb0ec0a..768987d5f2dd 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -685,12 +685,17 @@ static void rkisp1_handle_buffer(struct rkisp1_capture *cap)
spin_unlock(&cap->buf.lock);
}
-void rkisp1_capture_isr(struct rkisp1_device *rkisp1)
+irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
unsigned int i;
u32 status;
status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
+ if (!status)
+ return IRQ_NONE;
+
rkisp1_write(rkisp1, status, RKISP1_CIF_MI_ICR);
for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); ++i) {
@@ -718,6 +723,8 @@ void rkisp1_capture_isr(struct rkisp1_device *rkisp1)
cap->is_streaming = false;
wake_up(&cap->done);
}
+
+ return IRQ_HANDLED;
}
/* ----------------------------------------------------------------------------
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index bb73f4e17b66..d8fa3f1a5a85 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -12,6 +12,7 @@
#define _RKISP1_COMMON_H
#include <linux/clk.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/rkisp1-config.h>
#include <media/media-device.h>
@@ -231,6 +232,16 @@ struct rkisp1_capture {
} pix;
};
+struct rkisp1_stats;
+struct rkisp1_stats_ops {
+ void (*get_awb_meas)(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf);
+ void (*get_aec_meas)(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf);
+ void (*get_hst_meas)(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf);
+};
+
/*
* struct rkisp1_stats - ISP Statistics device
*
@@ -243,17 +254,42 @@ struct rkisp1_capture {
struct rkisp1_stats {
struct rkisp1_vdev_node vnode;
struct rkisp1_device *rkisp1;
+ const struct rkisp1_stats_ops *ops;
spinlock_t lock; /* locks the buffers list 'stats' */
struct list_head stat;
struct v4l2_format vdev_fmt;
};
+struct rkisp1_params;
+struct rkisp1_params_ops {
+ void (*lsc_matrix_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig);
+ void (*goc_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg);
+ void (*awb_meas_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg);
+ void (*awb_meas_enable)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en);
+ void (*awb_gain_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg);
+ void (*aec_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg);
+ void (*hst_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg);
+ void (*hst_enable)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en);
+ void (*afm_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg);
+};
+
/*
* struct rkisp1_params - ISP input parameters device
*
* @vnode: video node
* @rkisp1: pointer to the rkisp1 device
+ * @ops: pointer to the variant-specific operations
* @config_lock: locks the buffer list 'params'
* @params: queue of rkisp1_buffer
* @vdev_fmt: v4l2_format of the metadata format
@@ -263,6 +299,7 @@ struct rkisp1_stats {
struct rkisp1_params {
struct rkisp1_vdev_node vnode;
struct rkisp1_device *rkisp1;
+ const struct rkisp1_params_ops *ops;
spinlock_t config_lock; /* locks the buffers list 'params' */
struct list_head params;
@@ -348,7 +385,6 @@ struct rkisp1_debug {
*/
struct rkisp1_device {
void __iomem *base_addr;
- int irq;
struct device *dev;
unsigned int clk_size;
struct clk_bulk_data clks[RKISP1_MAX_BUS_CLK];
@@ -456,9 +492,9 @@ void rkisp1_params_configure(struct rkisp1_params *params,
void rkisp1_params_disable(struct rkisp1_params *params);
/* irq handlers */
-void rkisp1_isp_isr(struct rkisp1_device *rkisp1);
-void rkisp1_mipi_isr(struct rkisp1_device *rkisp1);
-void rkisp1_capture_isr(struct rkisp1_device *rkisp1);
+irqreturn_t rkisp1_isp_isr(int irq, void *ctx);
+irqreturn_t rkisp1_mipi_isr(int irq, void *ctx);
+irqreturn_t rkisp1_capture_isr(int irq, void *ctx);
void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris);
void rkisp1_params_isr(struct rkisp1_device *rkisp1);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 7474150b94ed..50b166c49a03 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -101,9 +101,16 @@
* +-----------+ +-----------+
*/
+struct rkisp1_isr_data {
+ const char *name;
+ irqreturn_t (*isr)(int irq, void *ctx);
+};
+
struct rkisp1_match_data {
const char * const *clks;
- unsigned int size;
+ unsigned int clk_size;
+ const struct rkisp1_isr_data *isrs;
+ unsigned int isr_size;
enum rkisp1_cif_isp_version isp_ver;
};
@@ -246,7 +253,7 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
unsigned int next_id = 0;
int ret;
- v4l2_async_notifier_init(ntf);
+ v4l2_async_nf_init(ntf);
while (1) {
struct v4l2_fwnode_endpoint vep = {
@@ -265,8 +272,9 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
if (ret)
goto err_parse;
- rk_asd = v4l2_async_notifier_add_fwnode_remote_subdev(ntf, ep,
- struct rkisp1_sensor_async);
+ rk_asd = v4l2_async_nf_add_fwnode_remote(ntf, ep,
+ struct
+ rkisp1_sensor_async);
if (IS_ERR(rk_asd)) {
ret = PTR_ERR(rk_asd);
goto err_parse;
@@ -286,16 +294,16 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
continue;
err_parse:
fwnode_handle_put(ep);
- v4l2_async_notifier_cleanup(ntf);
+ v4l2_async_nf_cleanup(ntf);
return ret;
}
if (next_id == 0)
dev_dbg(rkisp1->dev, "no remote subdevice found\n");
ntf->ops = &rkisp1_subdev_notifier_ops;
- ret = v4l2_async_notifier_register(&rkisp1->v4l2_dev, ntf);
+ ret = v4l2_async_nf_register(&rkisp1->v4l2_dev, ntf);
if (ret) {
- v4l2_async_notifier_cleanup(ntf);
+ v4l2_async_nf_cleanup(ntf);
return ret;
}
return 0;
@@ -385,36 +393,64 @@ err_unreg_isp_subdev:
static irqreturn_t rkisp1_isr(int irq, void *ctx)
{
- struct device *dev = ctx;
- struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
-
/*
* Call rkisp1_capture_isr() first to handle the frame that
* potentially completed using the current frame_sequence number before
* it is potentially incremented by rkisp1_isp_isr() in the vertical
* sync.
*/
- rkisp1_capture_isr(rkisp1);
- rkisp1_isp_isr(rkisp1);
- rkisp1_mipi_isr(rkisp1);
+ rkisp1_capture_isr(irq, ctx);
+ rkisp1_isp_isr(irq, ctx);
+ rkisp1_mipi_isr(irq, ctx);
return IRQ_HANDLED;
}
+static const char * const px30_isp_clks[] = {
+ "isp",
+ "aclk",
+ "hclk",
+ "pclk",
+};
+
+static const struct rkisp1_isr_data px30_isp_isrs[] = {
+ { "isp", rkisp1_isp_isr },
+ { "mi", rkisp1_capture_isr },
+ { "mipi", rkisp1_mipi_isr },
+};
+
+static const struct rkisp1_match_data px30_isp_match_data = {
+ .clks = px30_isp_clks,
+ .clk_size = ARRAY_SIZE(px30_isp_clks),
+ .isrs = px30_isp_isrs,
+ .isr_size = ARRAY_SIZE(px30_isp_isrs),
+ .isp_ver = RKISP1_V12,
+};
+
static const char * const rk3399_isp_clks[] = {
"isp",
"aclk",
"hclk",
};
+static const struct rkisp1_isr_data rk3399_isp_isrs[] = {
+ { NULL, rkisp1_isr },
+};
+
static const struct rkisp1_match_data rk3399_isp_match_data = {
.clks = rk3399_isp_clks,
- .size = ARRAY_SIZE(rk3399_isp_clks),
+ .clk_size = ARRAY_SIZE(rk3399_isp_clks),
+ .isrs = rk3399_isp_isrs,
+ .isr_size = ARRAY_SIZE(rk3399_isp_isrs),
.isp_ver = RKISP1_V10,
};
static const struct of_device_id rkisp1_of_match[] = {
{
+ .compatible = "rockchip,px30-cif-isp",
+ .data = &px30_isp_match_data,
+ },
+ {
.compatible = "rockchip,rk3399-cif-isp",
.data = &rk3399_isp_match_data,
},
@@ -478,25 +514,27 @@ static int rkisp1_probe(struct platform_device *pdev)
if (IS_ERR(rkisp1->base_addr))
return PTR_ERR(rkisp1->base_addr);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(dev, irq, rkisp1_isr, IRQF_SHARED,
- dev_driver_string(dev), dev);
- if (ret) {
- dev_err(dev, "request irq failed: %d\n", ret);
- return ret;
+ for (i = 0; i < match_data->isr_size; i++) {
+ irq = (match_data->isrs[i].name) ?
+ platform_get_irq_byname(pdev, match_data->isrs[i].name) :
+ platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, match_data->isrs[i].isr, IRQF_SHARED,
+ dev_driver_string(dev), dev);
+ if (ret) {
+ dev_err(dev, "request irq failed: %d\n", ret);
+ return ret;
+ }
}
- rkisp1->irq = irq;
-
- for (i = 0; i < match_data->size; i++)
+ for (i = 0; i < match_data->clk_size; i++)
rkisp1->clks[i].id = match_data->clks[i];
- ret = devm_clk_bulk_get(dev, match_data->size, rkisp1->clks);
+ ret = devm_clk_bulk_get(dev, match_data->clk_size, rkisp1->clks);
if (ret)
return ret;
- rkisp1->clk_size = match_data->size;
+ rkisp1->clk_size = match_data->clk_size;
pm_runtime_enable(&pdev->dev);
@@ -542,8 +580,8 @@ static int rkisp1_remove(struct platform_device *pdev)
{
struct rkisp1_device *rkisp1 = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&rkisp1->notifier);
- v4l2_async_notifier_cleanup(&rkisp1->notifier);
+ v4l2_async_nf_unregister(&rkisp1->notifier);
+ v4l2_async_nf_cleanup(&rkisp1->notifier);
rkisp1_params_unregister(rkisp1);
rkisp1_stats_unregister(rkisp1);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index d596bc040005..2a35bf24e54e 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -414,6 +414,10 @@ static int rkisp1_config_mipi(struct rkisp1_device *rkisp1)
rkisp1_write(rkisp1, mipi_ctrl, RKISP1_CIF_MIPI_CTRL);
+ /* V12 could also use a newer csi2-host, but we don't want that yet */
+ if (rkisp1->media_dev.hw_revision == RKISP1_V12)
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_CSI0_CTRL0);
+
/* Configure Data Type and Virtual Channel */
rkisp1_write(rkisp1,
RKISP1_CIF_MIPI_DATA_SEL_DT(sink_fmt->mipi_dt) |
@@ -533,6 +537,15 @@ static void rkisp1_config_clk(struct rkisp1_device *rkisp1)
RKISP1_CIF_ICCL_DCROP_CLK;
rkisp1_write(rkisp1, val, RKISP1_CIF_ICCL);
+
+ /* ensure sp and mp can run at the same time in V12 */
+ if (rkisp1->media_dev.hw_revision == RKISP1_V12) {
+ val = RKISP1_CIF_CLK_CTRL_MI_Y12 | RKISP1_CIF_CLK_CTRL_MI_SP |
+ RKISP1_CIF_CLK_CTRL_MI_RAW0 | RKISP1_CIF_CLK_CTRL_MI_RAW1 |
+ RKISP1_CIF_CLK_CTRL_MI_READ | RKISP1_CIF_CLK_CTRL_MI_RAWRD |
+ RKISP1_CIF_CLK_CTRL_CP | RKISP1_CIF_CLK_CTRL_IE;
+ rkisp1_write(rkisp1, val, RKISP1_CIF_VI_ISP_CLK_CTRL_V12);
+ }
}
static void rkisp1_isp_start(struct rkisp1_device *rkisp1)
@@ -1106,13 +1119,15 @@ void rkisp1_isp_unregister(struct rkisp1_device *rkisp1)
* Interrupt handlers
*/
-void rkisp1_mipi_isr(struct rkisp1_device *rkisp1)
+irqreturn_t rkisp1_mipi_isr(int irq, void *ctx)
{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 val, status;
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
if (!status)
- return;
+ return IRQ_NONE;
rkisp1_write(rkisp1, status, RKISP1_CIF_MIPI_ICR);
@@ -1147,6 +1162,8 @@ void rkisp1_mipi_isr(struct rkisp1_device *rkisp1)
} else {
rkisp1->debug.mipi_error++;
}
+
+ return IRQ_HANDLED;
}
static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
@@ -1159,13 +1176,15 @@ static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
v4l2_event_queue(isp->sd.devnode, &event);
}
-void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
+irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 status, isp_err;
status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (!status)
- return;
+ return IRQ_NONE;
rkisp1_write(rkisp1, status, RKISP1_CIF_ISP_ICR);
@@ -1207,4 +1226,6 @@ void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
*/
rkisp1_params_isr(rkisp1);
}
+
+ return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index 8fa5b0abf1f9..8f62f09e635f 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -185,8 +185,8 @@ static void rkisp1_bls_config(struct rkisp1_params *params,
/* ISP LS correction interface function */
static void
-rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_lsc_config *pconfig)
+rkisp1_lsc_matrix_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig)
{
unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
@@ -212,39 +212,111 @@ rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params,
* DWORDs (2nd value of last DWORD unused)
*/
for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i][j],
- pconfig->r_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j],
+ pconfig->r_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i][j],
- pconfig->gr_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j],
+ pconfig->gr_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i][j],
- pconfig->gb_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j],
+ pconfig->gb_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i][j],
- pconfig->b_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j],
+ pconfig->b_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
}
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ }
+ isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+ RKISP1_CIF_ISP_LSC_TABLE_0 :
+ RKISP1_CIF_ISP_LSC_TABLE_1;
+ rkisp1_write(params->rkisp1, isp_lsc_table_sel,
+ RKISP1_CIF_ISP_LSC_TABLE_SEL);
+}
+
+static void
+rkisp1_lsc_matrix_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig)
+{
+ unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
+
+ isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
+
+ /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */
+ sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+ RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
+ RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR);
+
+ /* program data tables (table size is 9 * 17 = 153) */
+ for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
+ /*
+ * 17 sectors with 2 values in one DWORD = 9
+ * DWORDs (2nd value of last DWORD unused)
+ */
+ for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->r_data_tbl[i][j],
+ pconfig->r_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->gr_data_tbl[i][j],
+ pconfig->gr_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->gb_data_tbl[i][j],
+ pconfig->gb_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->b_data_tbl[i][j],
+ pconfig->b_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ }
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->r_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gr_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gb_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->b_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
}
@@ -265,7 +337,7 @@ static void rkisp1_lsc_config(struct rkisp1_params *params,
lsc_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_CTRL);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
- rkisp1_lsc_correct_matrix_config(params, arg);
+ params->ops->lsc_matrix_config(params, arg);
for (i = 0; i < RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE / 2; i++) {
/* program x size tables */
@@ -382,18 +454,37 @@ static void rkisp1_sdg_config(struct rkisp1_params *params,
}
/* ISP GAMMA correction interface function */
-static void rkisp1_goc_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_goc_config *arg)
+static void rkisp1_goc_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg)
{
unsigned int i;
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
- rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
+ rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10);
for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
rkisp1_write(params->rkisp1, arg->gamma_y[i],
- RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
+ RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 + i * 4);
+}
+
+static void rkisp1_goc_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg)
+{
+ unsigned int i;
+ u32 value;
+
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+ rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12);
+
+ for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 / 2; i++) {
+ value = RKISP1_CIF_ISP_GAMMA_VALUE_V12(
+ arg->gamma_y[2 * i + 1],
+ arg->gamma_y[2 * i]);
+ rkisp1_write(params->rkisp1, value,
+ RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 + i * 4);
+ }
}
/* ISP Cross Talk */
@@ -433,8 +524,8 @@ static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en)
}
/* ISP White Balance Mode */
-static void rkisp1_awb_meas_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_awb_meas_config *arg)
+static void rkisp1_awb_meas_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg)
{
u32 reg_val = 0;
/* based on the mode,configure the awb module */
@@ -442,43 +533,111 @@ static void rkisp1_awb_meas_config(struct rkisp1_params *params,
/* Reference Cb and Cr */
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
- arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF);
+ arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF_V10);
/* Yc Threshold */
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
- arg->min_c, RKISP1_CIF_ISP_AWB_THRESH);
+ arg->min_c, RKISP1_CIF_ISP_AWB_THRESH_V10);
}
- reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+ reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10);
if (arg->enable_ymax_cmp)
reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
else
reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
/* window offset */
rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_offs, RKISP1_CIF_ISP_AWB_WND_V_OFFS);
+ arg->awb_wnd.v_offs, RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10);
rkisp1_write(params->rkisp1,
- arg->awb_wnd.h_offs, RKISP1_CIF_ISP_AWB_WND_H_OFFS);
+ arg->awb_wnd.h_offs, RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10);
/* AWB window size */
rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_size, RKISP1_CIF_ISP_AWB_WND_V_SIZE);
+ arg->awb_wnd.v_size, RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10);
rkisp1_write(params->rkisp1,
- arg->awb_wnd.h_size, RKISP1_CIF_ISP_AWB_WND_H_SIZE);
+ arg->awb_wnd.h_size, RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10);
/* Number of frames */
rkisp1_write(params->rkisp1,
- arg->frames, RKISP1_CIF_ISP_AWB_FRAMES);
+ arg->frames, RKISP1_CIF_ISP_AWB_FRAMES_V10);
+}
+
+static void rkisp1_awb_meas_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg)
+{
+ u32 reg_val = 0;
+ /* based on the mode,configure the awb module */
+ if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_YCBCR) {
+ /* Reference Cb and Cr */
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
+ arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF_V12);
+ /* Yc Threshold */
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
+ RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
+ RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
+ arg->min_c, RKISP1_CIF_ISP_AWB_THRESH_V12);
+ }
+
+ reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12);
+ if (arg->enable_ymax_cmp)
+ reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+ else
+ reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+ reg_val &= ~RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12;
+ reg_val |= RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(arg->frames);
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
+
+ /* window offset */
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.v_offs << 16 |
+ arg->awb_wnd.h_offs,
+ RKISP1_CIF_ISP_AWB_OFFS_V12);
+ /* AWB window size */
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.v_size << 16 |
+ arg->awb_wnd.h_size,
+ RKISP1_CIF_ISP_AWB_SIZE_V12);
+}
+
+static void
+rkisp1_awb_meas_enable_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en)
+{
+ u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10);
+
+ /* switch off */
+ reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
+
+ if (en) {
+ if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_RGB)
+ reg_val |= RKISP1_CIF_ISP_AWB_MODE_RGB_EN;
+ else
+ reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
+
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
+
+ /* Measurements require AWB block be active. */
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ } else {
+ rkisp1_write(params->rkisp1,
+ reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ }
}
static void
-rkisp1_awb_meas_enable(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_awb_meas_config *arg,
- bool en)
+rkisp1_awb_meas_enable_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en)
{
- u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+ u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12);
/* switch off */
reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
@@ -489,34 +648,47 @@ rkisp1_awb_meas_enable(struct rkisp1_params *params,
else
reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
/* Measurements require AWB block be active. */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
} else {
rkisp1_write(params->rkisp1,
- reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
}
}
static void
-rkisp1_awb_gain_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_awb_gain_config *arg)
+rkisp1_awb_gain_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg)
+{
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
+ arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G_V10);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
+ arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB_V10);
+}
+
+static void
+rkisp1_awb_gain_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg)
{
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
- arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G);
+ arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G_V12);
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
- arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB);
+ arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB_V12);
}
-static void rkisp1_aec_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_aec_config *arg)
+static void rkisp1_aec_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg)
{
unsigned int block_hsize, block_vsize;
u32 exp_ctrl;
@@ -531,21 +703,53 @@ static void rkisp1_aec_config(struct rkisp1_params *params,
rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
rkisp1_write(params->rkisp1,
- arg->meas_window.h_offs, RKISP1_CIF_ISP_EXP_H_OFFSET);
+ arg->meas_window.h_offs, RKISP1_CIF_ISP_EXP_H_OFFSET_V10);
rkisp1_write(params->rkisp1,
- arg->meas_window.v_offs, RKISP1_CIF_ISP_EXP_V_OFFSET);
+ arg->meas_window.v_offs, RKISP1_CIF_ISP_EXP_V_OFFSET_V10);
block_hsize = arg->meas_window.h_size /
- RKISP1_CIF_ISP_EXP_COLUMN_NUM - 1;
+ RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 - 1;
block_vsize = arg->meas_window.v_size /
- RKISP1_CIF_ISP_EXP_ROW_NUM - 1;
+ RKISP1_CIF_ISP_EXP_ROW_NUM_V10 - 1;
rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_EXP_H_SIZE_SET(block_hsize),
- RKISP1_CIF_ISP_EXP_H_SIZE);
+ RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(block_hsize),
+ RKISP1_CIF_ISP_EXP_H_SIZE_V10);
rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_EXP_V_SIZE_SET(block_vsize),
- RKISP1_CIF_ISP_EXP_V_SIZE);
+ RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(block_vsize),
+ RKISP1_CIF_ISP_EXP_V_SIZE_V10);
+}
+
+static void rkisp1_aec_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg)
+{
+ u32 exp_ctrl;
+ u32 block_hsize, block_vsize;
+ u32 wnd_num_idx = 1;
+ const u32 ae_wnd_num[] = { 5, 9, 15, 15 };
+
+ /* avoid to override the old enable value */
+ exp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL);
+ exp_ctrl &= RKISP1_CIF_ISP_EXP_ENA;
+ if (arg->autostop)
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP;
+ if (arg->mode == RKISP1_CIF_ISP_EXP_MEASURING_MODE_1)
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1;
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_WNDNUM_SET_V12(wnd_num_idx);
+ rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(arg->meas_window.v_offs) |
+ RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(arg->meas_window.h_offs),
+ RKISP1_CIF_ISP_EXP_OFFS_V12);
+
+ block_hsize = arg->meas_window.h_size / ae_wnd_num[wnd_num_idx] - 1;
+ block_vsize = arg->meas_window.v_size / ae_wnd_num[wnd_num_idx] - 1;
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(block_vsize) |
+ RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(block_hsize),
+ RKISP1_CIF_ISP_EXP_SIZE_V12);
}
static void rkisp1_cproc_config(struct rkisp1_params *params,
@@ -578,73 +782,151 @@ static void rkisp1_cproc_config(struct rkisp1_params *params,
}
}
-static void rkisp1_hst_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_hst_config *arg)
+static void rkisp1_hst_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg)
{
unsigned int block_hsize, block_vsize;
static const u32 hist_weight_regs[] = {
- RKISP1_CIF_ISP_HIST_WEIGHT_00TO30,
- RKISP1_CIF_ISP_HIST_WEIGHT_40TO21,
- RKISP1_CIF_ISP_HIST_WEIGHT_31TO12,
- RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
- RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
- RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
+ RKISP1_CIF_ISP_HIST_WEIGHT_00TO30_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_40TO21_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_22TO03_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_13TO43_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_04TO34_V10,
};
const u8 *weight;
unsigned int i;
u32 hist_prop;
/* avoid to override the old enable value */
- hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP);
- hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
- hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET(arg->histogram_predivider);
- rkisp1_write(params->rkisp1, hist_prop, RKISP1_CIF_ISP_HIST_PROP);
+ hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP_V10);
+ hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10;
+ hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(arg->histogram_predivider);
+ rkisp1_write(params->rkisp1, hist_prop, RKISP1_CIF_ISP_HIST_PROP_V10);
rkisp1_write(params->rkisp1,
arg->meas_window.h_offs,
- RKISP1_CIF_ISP_HIST_H_OFFS);
+ RKISP1_CIF_ISP_HIST_H_OFFS_V10);
rkisp1_write(params->rkisp1,
arg->meas_window.v_offs,
- RKISP1_CIF_ISP_HIST_V_OFFS);
+ RKISP1_CIF_ISP_HIST_V_OFFS_V10);
block_hsize = arg->meas_window.h_size /
- RKISP1_CIF_ISP_HIST_COLUMN_NUM - 1;
- block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM - 1;
+ RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10 - 1;
+ block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM_V10 - 1;
- rkisp1_write(params->rkisp1, block_hsize, RKISP1_CIF_ISP_HIST_H_SIZE);
- rkisp1_write(params->rkisp1, block_vsize, RKISP1_CIF_ISP_HIST_V_SIZE);
+ rkisp1_write(params->rkisp1, block_hsize, RKISP1_CIF_ISP_HIST_H_SIZE_V10);
+ rkisp1_write(params->rkisp1, block_vsize, RKISP1_CIF_ISP_HIST_V_SIZE_V10);
weight = arg->hist_weight;
for (i = 0; i < ARRAY_SIZE(hist_weight_regs); ++i, weight += 4)
rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_HIST_WEIGHT_SET(weight[0],
+ RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(weight[0],
weight[1],
weight[2],
weight[3]),
hist_weight_regs[i]);
- rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44);
+ rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44_V10);
+}
+
+static void rkisp1_hst_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg)
+{
+ unsigned int i, j;
+ u32 block_hsize, block_vsize;
+ u32 wnd_num_idx, hist_weight_num, hist_ctrl, value;
+ u8 weight15x15[RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12];
+ const u32 hist_wnd_num[] = { 5, 9, 15, 15 };
+
+ /* now we just support 9x9 window */
+ wnd_num_idx = 1;
+ memset(weight15x15, 0x00, sizeof(weight15x15));
+ /* avoid to override the old enable value */
+ hist_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_CTRL_V12);
+ hist_ctrl &= RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 |
+ RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12;
+ hist_ctrl = hist_ctrl |
+ RKISP1_CIF_ISP_HIST_CTRL_INTRSEL_SET_V12(1) |
+ RKISP1_CIF_ISP_HIST_CTRL_DATASEL_SET_V12(0) |
+ RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(0) |
+ RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(0) |
+ RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(1) |
+ RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(arg->histogram_predivider);
+ rkisp1_write(params->rkisp1, hist_ctrl, RKISP1_CIF_ISP_HIST_CTRL_V12);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_OFFS_SET_V12(arg->meas_window.h_offs,
+ arg->meas_window.v_offs),
+ RKISP1_CIF_ISP_HIST_OFFS_V12);
+
+ block_hsize = arg->meas_window.h_size / hist_wnd_num[wnd_num_idx] - 1;
+ block_vsize = arg->meas_window.v_size / hist_wnd_num[wnd_num_idx] - 1;
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_SIZE_SET_V12(block_hsize, block_vsize),
+ RKISP1_CIF_ISP_HIST_SIZE_V12);
+
+ for (i = 0; i < hist_wnd_num[wnd_num_idx]; i++) {
+ for (j = 0; j < hist_wnd_num[wnd_num_idx]; j++) {
+ weight15x15[i * RKISP1_CIF_ISP_HIST_ROW_NUM_V12 + j] =
+ arg->hist_weight[i * hist_wnd_num[wnd_num_idx] + j];
+ }
+ }
+
+ hist_weight_num = RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12;
+ for (i = 0; i < (hist_weight_num / 4); i++) {
+ value = RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(
+ weight15x15[4 * i + 0],
+ weight15x15[4 * i + 1],
+ weight15x15[4 * i + 2],
+ weight15x15[4 * i + 3]);
+ rkisp1_write(params->rkisp1, value,
+ RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i);
+ }
+ value = RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(weight15x15[4 * i + 0], 0, 0, 0);
+ rkisp1_write(params->rkisp1, value,
+ RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i);
}
static void
-rkisp1_hst_enable(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_hst_config *arg, bool en)
+rkisp1_hst_enable_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en)
{
if (en) {
u32 hist_prop = rkisp1_read(params->rkisp1,
- RKISP1_CIF_ISP_HIST_PROP);
+ RKISP1_CIF_ISP_HIST_PROP_V10);
- hist_prop &= ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
+ hist_prop &= ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10;
hist_prop |= arg->mode;
- rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
hist_prop);
} else {
- rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_PROP,
- RKISP1_CIF_ISP_HIST_PROP_MODE_MASK);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
+ RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10);
+ }
+}
+
+static void
+rkisp1_hst_enable_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en)
+{
+ if (en) {
+ u32 hist_ctrl = rkisp1_read(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_CTRL_V12);
+
+ hist_ctrl &= ~RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12;
+ hist_ctrl |= RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(arg->mode);
+ hist_ctrl |= RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(1);
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_CTRL_V12,
+ hist_ctrl);
+ } else {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_CTRL_V12,
+ RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 |
+ RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12);
}
}
-static void rkisp1_afm_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_afc_config *arg)
+static void rkisp1_afm_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg)
{
size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
arg->num_afm_win);
@@ -674,6 +956,45 @@ static void rkisp1_afm_config(struct rkisp1_params *params,
rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
}
+static void rkisp1_afm_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg)
+{
+ size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
+ arg->num_afm_win);
+ u32 afm_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL);
+ u32 lum_var_shift, afm_var_shift;
+ unsigned int i;
+
+ /* Switch off to configure. */
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+
+ for (i = 0; i < num_of_win; i++) {
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_offs) |
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs),
+ RKISP1_CIF_ISP_AFM_LT_A + i * 8);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_size +
+ arg->afm_win[i].h_offs) |
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_size +
+ arg->afm_win[i].v_offs),
+ RKISP1_CIF_ISP_AFM_RB_A + i * 8);
+ }
+ rkisp1_write(params->rkisp1, arg->thres, RKISP1_CIF_ISP_AFM_THRES);
+
+ lum_var_shift = RKISP1_CIF_ISP_AFM_GET_LUM_SHIFT_a_V12(arg->var_shift);
+ afm_var_shift = RKISP1_CIF_ISP_AFM_GET_AFM_SHIFT_a_V12(arg->var_shift);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(lum_var_shift, afm_var_shift) |
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(lum_var_shift, afm_var_shift) |
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(lum_var_shift, afm_var_shift),
+ RKISP1_CIF_ISP_AFM_VAR_SHIFT);
+
+ /* restore afm status */
+ rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
+}
+
static void rkisp1_ie_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_ie_config *arg)
{
@@ -955,7 +1276,7 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
/* update awb gains */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN)
- rkisp1_awb_gain_config(params, &new_params->others.awb_gain_config);
+ params->ops->awb_gain_config(params, &new_params->others.awb_gain_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AWB_GAIN)
@@ -1010,8 +1331,7 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
/* update goc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_GOC)
- rkisp1_goc_config(params,
- &new_params->others.goc_config);
+ params->ops->goc_config(params, &new_params->others.goc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_GOC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_GOC)
@@ -1081,17 +1401,17 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
/* update awb config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB)
- rkisp1_awb_meas_config(params, &new_params->meas.awb_meas_config);
+ params->ops->awb_meas_config(params, &new_params->meas.awb_meas_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB)
- rkisp1_awb_meas_enable(params,
- &new_params->meas.awb_meas_config,
- !!(module_ens & RKISP1_CIF_ISP_MODULE_AWB));
+ params->ops->awb_meas_enable(params,
+ &new_params->meas.awb_meas_config,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_AWB));
/* update afc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AFC)
- rkisp1_afm_config(params,
- &new_params->meas.afc_config);
+ params->ops->afm_config(params,
+ &new_params->meas.afc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AFC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AFC)
@@ -1106,18 +1426,18 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
/* update hst config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_HST)
- rkisp1_hst_config(params,
- &new_params->meas.hst_config);
+ params->ops->hst_config(params,
+ &new_params->meas.hst_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_HST)
- rkisp1_hst_enable(params,
- &new_params->meas.hst_config,
- !!(module_ens & RKISP1_CIF_ISP_MODULE_HST));
+ params->ops->hst_enable(params,
+ &new_params->meas.hst_config,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_HST));
/* update aec config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AEC)
- rkisp1_aec_config(params,
- &new_params->meas.aec_config);
+ params->ops->aec_config(params,
+ &new_params->meas.aec_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AEC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AEC)
@@ -1218,21 +1538,21 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params)
{
struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
- rkisp1_awb_meas_config(params, &rkisp1_awb_params_default_config);
- rkisp1_awb_meas_enable(params, &rkisp1_awb_params_default_config,
- true);
+ params->ops->awb_meas_config(params, &rkisp1_awb_params_default_config);
+ params->ops->awb_meas_enable(params, &rkisp1_awb_params_default_config,
+ true);
- rkisp1_aec_config(params, &rkisp1_aec_params_default_config);
+ params->ops->aec_config(params, &rkisp1_aec_params_default_config);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
RKISP1_CIF_ISP_EXP_ENA);
- rkisp1_afm_config(params, &rkisp1_afc_params_default_config);
+ params->ops->afm_config(params, &rkisp1_afc_params_default_config);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight));
- rkisp1_hst_config(params, &hst);
- rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ params->ops->hst_config(params, &hst);
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
rkisp1_hst_params_default_config.mode);
/* set the range */
@@ -1278,7 +1598,7 @@ void rkisp1_params_disable(struct rkisp1_params *params)
RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_FILT_MODE,
RKISP1_CIF_ISP_FLT_ENA);
- rkisp1_awb_meas_enable(params, NULL, false);
+ params->ops->awb_meas_enable(params, NULL, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
@@ -1286,7 +1606,7 @@ void rkisp1_params_disable(struct rkisp1_params *params)
rkisp1_ctk_enable(params, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
RKISP1_CIF_C_PROC_CTR_ENABLE);
- rkisp1_hst_enable(params, NULL, false);
+ params->ops->hst_enable(params, NULL, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
rkisp1_ie_enable(params, false);
@@ -1294,6 +1614,30 @@ void rkisp1_params_disable(struct rkisp1_params *params)
RKISP1_CIF_ISP_DPF_MODE_EN);
}
+static const struct rkisp1_params_ops rkisp1_v10_params_ops = {
+ .lsc_matrix_config = rkisp1_lsc_matrix_config_v10,
+ .goc_config = rkisp1_goc_config_v10,
+ .awb_meas_config = rkisp1_awb_meas_config_v10,
+ .awb_meas_enable = rkisp1_awb_meas_enable_v10,
+ .awb_gain_config = rkisp1_awb_gain_config_v10,
+ .aec_config = rkisp1_aec_config_v10,
+ .hst_config = rkisp1_hst_config_v10,
+ .hst_enable = rkisp1_hst_enable_v10,
+ .afm_config = rkisp1_afm_config_v10,
+};
+
+static struct rkisp1_params_ops rkisp1_v12_params_ops = {
+ .lsc_matrix_config = rkisp1_lsc_matrix_config_v12,
+ .goc_config = rkisp1_goc_config_v12,
+ .awb_meas_config = rkisp1_awb_meas_config_v12,
+ .awb_meas_enable = rkisp1_awb_meas_enable_v12,
+ .awb_gain_config = rkisp1_awb_gain_config_v12,
+ .aec_config = rkisp1_aec_config_v12,
+ .hst_config = rkisp1_hst_config_v12,
+ .hst_enable = rkisp1_hst_enable_v12,
+ .afm_config = rkisp1_afm_config_v12,
+};
+
static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -1459,6 +1803,11 @@ static void rkisp1_init_params(struct rkisp1_params *params)
V4L2_META_FMT_RK_ISP1_PARAMS;
params->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_params_cfg);
+
+ if (params->rkisp1->media_dev.hw_revision == RKISP1_V12)
+ params->ops = &rkisp1_v12_params_ops;
+ else
+ params->ops = &rkisp1_v10_params_ops;
}
int rkisp1_params_register(struct rkisp1_device *rkisp1)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index fa33080f51db..d326214c7e07 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -212,6 +212,35 @@
/* CCL */
#define RKISP1_CIF_CCL_CIF_CLK_DIS BIT(2)
+/* VI_ISP_CLK_CTRL */
+#define RKISP1_CIF_CLK_CTRL_ISP_RAW BIT(0)
+#define RKISP1_CIF_CLK_CTRL_ISP_RGB BIT(1)
+#define RKISP1_CIF_CLK_CTRL_ISP_YUV BIT(2)
+#define RKISP1_CIF_CLK_CTRL_ISP_3A BIT(3)
+#define RKISP1_CIF_CLK_CTRL_MIPI_RAW BIT(4)
+#define RKISP1_CIF_CLK_CTRL_ISP_IE BIT(5)
+#define RKISP1_CIF_CLK_CTRL_RSZ_RAM BIT(6)
+#define RKISP1_CIF_CLK_CTRL_JPEG_RAM BIT(7)
+#define RKISP1_CIF_CLK_CTRL_ACLK_ISP BIT(8)
+#define RKISP1_CIF_CLK_CTRL_MI_IDC BIT(9)
+#define RKISP1_CIF_CLK_CTRL_MI_MP BIT(10)
+#define RKISP1_CIF_CLK_CTRL_MI_JPEG BIT(11)
+#define RKISP1_CIF_CLK_CTRL_MI_DP BIT(12)
+#define RKISP1_CIF_CLK_CTRL_MI_Y12 BIT(13)
+#define RKISP1_CIF_CLK_CTRL_MI_SP BIT(14)
+#define RKISP1_CIF_CLK_CTRL_MI_RAW0 BIT(15)
+#define RKISP1_CIF_CLK_CTRL_MI_RAW1 BIT(16)
+#define RKISP1_CIF_CLK_CTRL_MI_READ BIT(17)
+#define RKISP1_CIF_CLK_CTRL_MI_RAWRD BIT(18)
+#define RKISP1_CIF_CLK_CTRL_CP BIT(19)
+#define RKISP1_CIF_CLK_CTRL_IE BIT(20)
+#define RKISP1_CIF_CLK_CTRL_SI BIT(21)
+#define RKISP1_CIF_CLK_CTRL_RSZM BIT(22)
+#define RKISP1_CIF_CLK_CTRL_DPMUX BIT(23)
+#define RKISP1_CIF_CLK_CTRL_JPEG BIT(24)
+#define RKISP1_CIF_CLK_CTRL_RSZS BIT(25)
+#define RKISP1_CIF_CLK_CTRL_MIPI BIT(26)
+#define RKISP1_CIF_CLK_CTRL_MARVINMI BIT(27)
/* ICCL */
#define RKISP1_CIF_ICCL_ISP_CLK BIT(0)
#define RKISP1_CIF_ICCL_CP_CLK BIT(1)
@@ -346,26 +375,58 @@
#define RKISP1_CIF_SUPER_IMP_CTRL_TRANSP_DIS BIT(2)
/* ISP HISTOGRAM CALCULATION : ISP_HIST_PROP */
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_DIS (0 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB BIT(0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_RED (2 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_GREEN (3 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE (4 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM (5 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK 0x7
-#define RKISP1_CIF_ISP_HIST_PREDIV_SET(x) (((x) & 0x7F) << 3)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_SET(v0, v1, v2, v3) \
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_DIS_V10 (0 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB_V10 BIT(0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RED_V10 (2 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_GREEN_V10 (3 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE_V10 (4 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM_V10 (5 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10 0x7
+#define RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(x) (((x) & 0x7F) << 3)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(v0, v1, v2, v3) \
(((v0) & 0x1F) | (((v1) & 0x1F) << 8) |\
(((v2) & 0x1F) << 16) | \
(((v3) & 0x1F) << 24))
-#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED 0xFFFFF000
-#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED 0xFFFFF800
-#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED 0xE0E0E0E0
-#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER 0x0000007F
-#define RKISP1_CIF_ISP_HIST_ROW_NUM 5
-#define RKISP1_CIF_ISP_HIST_COLUMN_NUM 5
-#define RKISP1_CIF_ISP_HIST_GET_BIN(x) ((x) & 0x000FFFFF)
+#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED_V10 0xFFFFF000
+#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED_V10 0xFFFFF800
+#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED_V10 0xE0E0E0E0
+#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER_V10 0x0000007F
+#define RKISP1_CIF_ISP_HIST_ROW_NUM_V10 5
+#define RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10 5
+#define RKISP1_CIF_ISP_HIST_GET_BIN_V10(x) ((x) & 0x000FFFFF)
+
+/* ISP HISTOGRAM CALCULATION : CIF_ISP_HIST */
+#define RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(x) (((x) & 0x01) << 0)
+#define RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12 RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(0x01)
+#define RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(x) (((x) & 0x7F) << 1)
+#define RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(x) (((x) & 0x07) << 8)
+#define RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(0x07)
+#define RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(x) (((x) & 0x01) << 11)
+#define RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(x) (((x) & 0xFFF) << 12)
+#define RKISP1_CIF_ISP_HIST_CTRL_DATASEL_SET_V12(x) (((x) & 0x07) << 24)
+#define RKISP1_CIF_ISP_HIST_CTRL_INTRSEL_SET_V12(x) (((x) & 0x01) << 27)
+#define RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(x) (((x) & 0x03) << 28)
+#define RKISP1_CIF_ISP_HIST_CTRL_DBGEN_SET_V12(x) (((x) & 0x01) << 30)
+#define RKISP1_CIF_ISP_HIST_ROW_NUM_V12 15
+#define RKISP1_CIF_ISP_HIST_COLUMN_NUM_V12 15
+#define RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12 \
+ (RKISP1_CIF_ISP_HIST_ROW_NUM_V12 * RKISP1_CIF_ISP_HIST_COLUMN_NUM_V12)
+
+#define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(v0, v1, v2, v3) \
+ (((v0) & 0x3F) | (((v1) & 0x3F) << 8) |\
+ (((v2) & 0x3F) << 16) |\
+ (((v3) & 0x3F) << 24))
+
+#define RKISP1_CIF_ISP_HIST_OFFS_SET_V12(v0, v1) \
+ (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 16))
+#define RKISP1_CIF_ISP_HIST_SIZE_SET_V12(v0, v1) \
+ (((v0) & 0x7FF) | (((v1) & 0x7FF) << 16))
+
+#define RKISP1_CIF_ISP_HIST_GET_BIN0_V12(x) \
+ ((x) & 0xFFFF)
+#define RKISP1_CIF_ISP_HIST_GET_BIN1_V12(x) \
+ (((x) >> 16) & 0xFFFF)
/* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */
#define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0)
@@ -401,6 +462,8 @@
#define RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN ((0 << 31) | (0x2 << 0))
#define RKISP1_CIF_ISP_AWB_MODE_MASK_NONE 0xFFFFFFFC
#define RKISP1_CIF_ISP_AWB_MODE_READ(x) ((x) & 3)
+#define RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(x) (((x) & 0x07) << 28)
+#define RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12 RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(0x07)
/* ISP_AWB_GAIN_RB, ISP_AWB_GAIN_G */
#define RKISP1_CIF_ISP_AWB_GAIN_R_SET(x) (((x) & 0x3FF) << 16)
#define RKISP1_CIF_ISP_AWB_GAIN_R_READ(x) (((x) >> 16) & 0x3FF)
@@ -435,6 +498,7 @@
/* ISP_EXP_CTRL */
#define RKISP1_CIF_ISP_EXP_ENA BIT(0)
#define RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP BIT(1)
+#define RKISP1_CIF_ISP_EXP_CTRL_WNDNUM_SET_V12(x) (((x) & 0x03) << 2)
/*
*'1' luminance calculation according to Y=(R+G+B) x 0.332 (85/256)
*'0' luminance calculation according to Y=16+0.25R+0.5G+0.1094B
@@ -442,42 +506,76 @@
#define RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1 BIT(31)
/* ISP_EXP_H_SIZE */
-#define RKISP1_CIF_ISP_EXP_H_SIZE_SET(x) ((x) & 0x7FF)
-#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK 0x000007FF
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(x) ((x) & 0x7FF)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V10 0x000007FF
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(x) ((x) & 0x7FF)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V12 0x000007FF
/* ISP_EXP_V_SIZE : vertical size must be a multiple of 2). */
-#define RKISP1_CIF_ISP_EXP_V_SIZE_SET(x) ((x) & 0x7FE)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(x) ((x) & 0x7FE)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(x) (((x) & 0x7FE) << 16)
/* ISP_EXP_H_OFFSET */
-#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET(x) ((x) & 0x1FFF)
-#define RKISP1_CIF_ISP_EXP_MAX_HOFFS 2424
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V10(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V10 2424
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V12 0x1FFF
/* ISP_EXP_V_OFFSET */
-#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET(x) ((x) & 0x1FFF)
-#define RKISP1_CIF_ISP_EXP_MAX_VOFFS 1806
-
-#define RKISP1_CIF_ISP_EXP_ROW_NUM 5
-#define RKISP1_CIF_ISP_EXP_COLUMN_NUM 5
-#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS \
- (RKISP1_CIF_ISP_EXP_ROW_NUM * RKISP1_CIF_ISP_EXP_COLUMN_NUM)
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE 516
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE 35
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE 390
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE 28
-#define RKISP1_CIF_ISP_EXP_MAX_HSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
-#define RKISP1_CIF_ISP_EXP_MIN_HSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
-#define RKISP1_CIF_ISP_EXP_MAX_VSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
-#define RKISP1_CIF_ISP_EXP_MIN_VSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V10(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V10 1806
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(x) (((x) & 0x1FFF) << 16)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V12 0x1FFF
+
+#define RKISP1_CIF_ISP_EXP_ROW_NUM_V10 5
+#define RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 5
+#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS_V10 \
+ (RKISP1_CIF_ISP_EXP_ROW_NUM_V10 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10)
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V10 516
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V10 35
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V10 390
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V10 28
+#define RKISP1_CIF_ISP_EXP_MAX_HSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V10 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_HSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V10 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 + 1)
+#define RKISP1_CIF_ISP_EXP_MAX_VSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V10 * RKISP1_CIF_ISP_EXP_ROW_NUM_V10 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_VSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V10 * RKISP1_CIF_ISP_EXP_ROW_NUM_V10 + 1)
+
+#define RKISP1_CIF_ISP_EXP_ROW_NUM_V12 15
+#define RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 15
+#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS_V12 \
+ (RKISP1_CIF_ISP_EXP_ROW_NUM_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12)
+
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 0x7FF
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12 0xE
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12 0x7FE
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 0xE
+#define RKISP1_CIF_ISP_EXP_MAX_HSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_HSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 + 1)
+#define RKISP1_CIF_ISP_EXP_MAX_VSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12 * RKISP1_CIF_ISP_EXP_ROW_NUM_V12 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_VSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 * RKISP1_CIF_ISP_EXP_ROW_NUM_V12 + 1)
+
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(x) ((x) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(x) (((x) >> 8) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(x) (((x) >> 16) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(x) (((x) >> 24) & 0xFF)
/* LSC: ISP_LSC_CTRL */
#define RKISP1_CIF_ISP_LSC_CTRL_ENA BIT(0)
#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED 0xFC00FC00
-#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED 0xF000F000
-#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED 0xF000F000
-#define RKISP1_CIF_ISP_LSC_TABLE_DATA(v0, v1) \
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V10 0xF000F000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V10 0xF000F000
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V12 0xE000E000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V12 0xE000E000
+#define RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(v0, v1) \
(((v0) & 0xFFF) | (((v1) & 0xFFF) << 12))
+#define RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(v0, v1) \
+ (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 13))
#define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1) \
(((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
#define RKISP1_CIF_ISP_LSC_GRAD_SIZE(v0, v1) \
@@ -550,6 +648,10 @@
(1 << 15) | (1 << 11) | (1 << 7) | (1 << 3))
#define RKISP1_CIFISP_DEGAMMA_Y_RESERVED 0xFFFFF000
+/* GAMMA-OUT */
+#define RKISP1_CIF_ISP_GAMMA_VALUE_V12(x, y) \
+ (((x) & 0xFFF) << 16 | ((y) & 0xFFF) << 0)
+
/* AFM */
#define RKISP1_CIF_ISP_AFM_ENA BIT(0)
#define RKISP1_CIF_ISP_AFM_THRES_RESERVED 0xFFFF0000
@@ -560,6 +662,11 @@
#define RKISP1_CIF_ISP_AFM_WINDOW_Y_MIN 0x2
#define RKISP1_CIF_ISP_AFM_WINDOW_X(x) (((x) & 0x1FFF) << 16)
#define RKISP1_CIF_ISP_AFM_WINDOW_Y(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(x, y) (((x) & 0x7) << 16 | ((y) & 0x7) << 0)
+#define RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(x, y) (((x) & 0x7) << 20 | ((y) & 0x7) << 4)
+#define RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(x, y) (((x) & 0x7) << 24 | ((y) & 0x7) << 8)
+#define RKISP1_CIF_ISP_AFM_GET_LUM_SHIFT_a_V12(x) (((x) & 0x70000) >> 16)
+#define RKISP1_CIF_ISP_AFM_GET_AFM_SHIFT_a_V12(x) ((x) & 0x7)
/* DPF */
#define RKISP1_CIF_ISP_DPF_MODE_EN BIT(0)
@@ -582,6 +689,7 @@
#define RKISP1_CIF_CTRL_BASE 0x00000000
#define RKISP1_CIF_CCL (RKISP1_CIF_CTRL_BASE + 0x00000000)
#define RKISP1_CIF_VI_ID (RKISP1_CIF_CTRL_BASE + 0x00000008)
+#define RKISP1_CIF_VI_ISP_CLK_CTRL_V12 (RKISP1_CIF_CTRL_BASE + 0x0000000C)
#define RKISP1_CIF_ICCL (RKISP1_CIF_CTRL_BASE + 0x00000010)
#define RKISP1_CIF_IRCL (RKISP1_CIF_CTRL_BASE + 0x00000014)
#define RKISP1_CIF_VI_DPCL (RKISP1_CIF_CTRL_BASE + 0x00000018)
@@ -667,18 +775,35 @@
#define RKISP1_CIF_ISP_GAMMA_B_Y14 (RKISP1_CIF_ISP_BASE + 0x000000E4)
#define RKISP1_CIF_ISP_GAMMA_B_Y15 (RKISP1_CIF_ISP_BASE + 0x000000E8)
#define RKISP1_CIF_ISP_GAMMA_B_Y16 (RKISP1_CIF_ISP_BASE + 0x000000EC)
-#define RKISP1_CIF_ISP_AWB_PROP (RKISP1_CIF_ISP_BASE + 0x00000110)
-#define RKISP1_CIF_ISP_AWB_WND_H_OFFS (RKISP1_CIF_ISP_BASE + 0x00000114)
-#define RKISP1_CIF_ISP_AWB_WND_V_OFFS (RKISP1_CIF_ISP_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_AWB_WND_H_SIZE (RKISP1_CIF_ISP_BASE + 0x0000011C)
-#define RKISP1_CIF_ISP_AWB_WND_V_SIZE (RKISP1_CIF_ISP_BASE + 0x00000120)
-#define RKISP1_CIF_ISP_AWB_FRAMES (RKISP1_CIF_ISP_BASE + 0x00000124)
-#define RKISP1_CIF_ISP_AWB_REF (RKISP1_CIF_ISP_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_AWB_THRESH (RKISP1_CIF_ISP_BASE + 0x0000012C)
-#define RKISP1_CIF_ISP_AWB_GAIN_G (RKISP1_CIF_ISP_BASE + 0x00000138)
-#define RKISP1_CIF_ISP_AWB_GAIN_RB (RKISP1_CIF_ISP_BASE + 0x0000013C)
-#define RKISP1_CIF_ISP_AWB_WHITE_CNT (RKISP1_CIF_ISP_BASE + 0x00000140)
-#define RKISP1_CIF_ISP_AWB_MEAN (RKISP1_CIF_ISP_BASE + 0x00000144)
+#define RKISP1_CIF_ISP_AWB_PROP_V10 (RKISP1_CIF_ISP_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10 (RKISP1_CIF_ISP_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10 (RKISP1_CIF_ISP_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10 (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10 (RKISP1_CIF_ISP_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_AWB_FRAMES_V10 (RKISP1_CIF_ISP_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_AWB_REF_V10 (RKISP1_CIF_ISP_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_AWB_THRESH_V10 (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_GAIN_G_V10 (RKISP1_CIF_ISP_BASE + 0x00000138)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V10 (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V10 (RKISP1_CIF_ISP_BASE + 0x00000140)
+#define RKISP1_CIF_ISP_AWB_MEAN_V10 (RKISP1_CIF_ISP_BASE + 0x00000144)
+#define RKISP1_CIF_ISP_AWB_PROP_V12 (RKISP1_CIF_ISP_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_AWB_SIZE_V12 (RKISP1_CIF_ISP_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_AWB_OFFS_V12 (RKISP1_CIF_ISP_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_AWB_REF_V12 (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_THRESH_V12 (RKISP1_CIF_ISP_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_X_COOR12_V12 (RKISP1_CIF_ISP_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_X_COOR34_V12 (RKISP1_CIF_ISP_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V12 (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_MEAN_V12 (RKISP1_CIF_ISP_BASE + 0x00000130)
+#define RKISP1_CIF_ISP_DEGAIN_V12 (RKISP1_CIF_ISP_BASE + 0x00000134)
+#define RKISP1_CIF_ISP_AWB_GAIN_G_V12 (RKISP1_CIF_ISP_BASE + 0x00000138)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V12 (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_REGION_LINE_V12 (RKISP1_CIF_ISP_BASE + 0x00000140)
+#define RKISP1_CIF_ISP_WP_CNT_REGION0_V12 (RKISP1_CIF_ISP_BASE + 0x00000160)
+#define RKISP1_CIF_ISP_WP_CNT_REGION1_V12 (RKISP1_CIF_ISP_BASE + 0x00000164)
+#define RKISP1_CIF_ISP_WP_CNT_REGION2_V12 (RKISP1_CIF_ISP_BASE + 0x00000168)
+#define RKISP1_CIF_ISP_WP_CNT_REGION3_V12 (RKISP1_CIF_ISP_BASE + 0x0000016C)
#define RKISP1_CIF_ISP_CC_COEFF_0 (RKISP1_CIF_ISP_BASE + 0x00000170)
#define RKISP1_CIF_ISP_CC_COEFF_1 (RKISP1_CIF_ISP_BASE + 0x00000174)
#define RKISP1_CIF_ISP_CC_COEFF_2 (RKISP1_CIF_ISP_BASE + 0x00000178)
@@ -712,30 +837,32 @@
#define RKISP1_CIF_ISP_CT_COEFF_6 (RKISP1_CIF_ISP_BASE + 0x000001E8)
#define RKISP1_CIF_ISP_CT_COEFF_7 (RKISP1_CIF_ISP_BASE + 0x000001EC)
#define RKISP1_CIF_ISP_CT_COEFF_8 (RKISP1_CIF_ISP_BASE + 0x000001F0)
-#define RKISP1_CIF_ISP_GAMMA_OUT_MODE (RKISP1_CIF_ISP_BASE + 0x000001F4)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0 (RKISP1_CIF_ISP_BASE + 0x000001F8)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1 (RKISP1_CIF_ISP_BASE + 0x000001FC)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_2 (RKISP1_CIF_ISP_BASE + 0x00000200)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_3 (RKISP1_CIF_ISP_BASE + 0x00000204)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_4 (RKISP1_CIF_ISP_BASE + 0x00000208)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5 (RKISP1_CIF_ISP_BASE + 0x0000020C)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_6 (RKISP1_CIF_ISP_BASE + 0x00000210)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_7 (RKISP1_CIF_ISP_BASE + 0x00000214)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_8 (RKISP1_CIF_ISP_BASE + 0x00000218)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9 (RKISP1_CIF_ISP_BASE + 0x0000021C)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_10 (RKISP1_CIF_ISP_BASE + 0x00000220)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_11 (RKISP1_CIF_ISP_BASE + 0x00000224)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_12 (RKISP1_CIF_ISP_BASE + 0x00000228)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13 (RKISP1_CIF_ISP_BASE + 0x0000022C)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_14 (RKISP1_CIF_ISP_BASE + 0x00000230)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_15 (RKISP1_CIF_ISP_BASE + 0x00000234)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_16 (RKISP1_CIF_ISP_BASE + 0x00000238)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10 (RKISP1_CIF_ISP_BASE + 0x000001F4)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 (RKISP1_CIF_ISP_BASE + 0x000001F8)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1_V10 (RKISP1_CIF_ISP_BASE + 0x000001FC)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_2_V10 (RKISP1_CIF_ISP_BASE + 0x00000200)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_3_V10 (RKISP1_CIF_ISP_BASE + 0x00000204)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_4_V10 (RKISP1_CIF_ISP_BASE + 0x00000208)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5_V10 (RKISP1_CIF_ISP_BASE + 0x0000020C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_6_V10 (RKISP1_CIF_ISP_BASE + 0x00000210)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_7_V10 (RKISP1_CIF_ISP_BASE + 0x00000214)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_8_V10 (RKISP1_CIF_ISP_BASE + 0x00000218)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9_V10 (RKISP1_CIF_ISP_BASE + 0x0000021C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_10_V10 (RKISP1_CIF_ISP_BASE + 0x00000220)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_11_V10 (RKISP1_CIF_ISP_BASE + 0x00000224)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_12_V10 (RKISP1_CIF_ISP_BASE + 0x00000228)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13_V10 (RKISP1_CIF_ISP_BASE + 0x0000022C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_14_V10 (RKISP1_CIF_ISP_BASE + 0x00000230)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_15_V10 (RKISP1_CIF_ISP_BASE + 0x00000234)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_16_V10 (RKISP1_CIF_ISP_BASE + 0x00000238)
#define RKISP1_CIF_ISP_ERR (RKISP1_CIF_ISP_BASE + 0x0000023C)
#define RKISP1_CIF_ISP_ERR_CLR (RKISP1_CIF_ISP_BASE + 0x00000240)
#define RKISP1_CIF_ISP_FRAME_COUNT (RKISP1_CIF_ISP_BASE + 0x00000244)
#define RKISP1_CIF_ISP_CT_OFFSET_R (RKISP1_CIF_ISP_BASE + 0x00000248)
#define RKISP1_CIF_ISP_CT_OFFSET_G (RKISP1_CIF_ISP_BASE + 0x0000024C)
#define RKISP1_CIF_ISP_CT_OFFSET_B (RKISP1_CIF_ISP_BASE + 0x00000250)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12 (RKISP1_CIF_ISP_BASE + 0x00000300)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 (RKISP1_CIF_ISP_BASE + 0x00000304)
#define RKISP1_CIF_ISP_FLASH_BASE 0x00000660
#define RKISP1_CIF_ISP_FLASH_CMD (RKISP1_CIF_ISP_FLASH_BASE + 0x00000000)
@@ -1005,36 +1132,35 @@
#define RKISP1_CIF_ISP_IS_H_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x0000002C)
#define RKISP1_CIF_ISP_IS_V_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_HIST_BASE 0x00002400
-
-#define RKISP1_CIF_ISP_HIST_PROP (RKISP1_CIF_ISP_HIST_BASE + 0x00000000)
-#define RKISP1_CIF_ISP_HIST_H_OFFS (RKISP1_CIF_ISP_HIST_BASE + 0x00000004)
-#define RKISP1_CIF_ISP_HIST_V_OFFS (RKISP1_CIF_ISP_HIST_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_HIST_H_SIZE (RKISP1_CIF_ISP_HIST_BASE + 0x0000000C)
-#define RKISP1_CIF_ISP_HIST_V_SIZE (RKISP1_CIF_ISP_HIST_BASE + 0x00000010)
-#define RKISP1_CIF_ISP_HIST_BIN_0 (RKISP1_CIF_ISP_HIST_BASE + 0x00000014)
-#define RKISP1_CIF_ISP_HIST_BIN_1 (RKISP1_CIF_ISP_HIST_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_HIST_BIN_2 (RKISP1_CIF_ISP_HIST_BASE + 0x0000001C)
-#define RKISP1_CIF_ISP_HIST_BIN_3 (RKISP1_CIF_ISP_HIST_BASE + 0x00000020)
-#define RKISP1_CIF_ISP_HIST_BIN_4 (RKISP1_CIF_ISP_HIST_BASE + 0x00000024)
-#define RKISP1_CIF_ISP_HIST_BIN_5 (RKISP1_CIF_ISP_HIST_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_HIST_BIN_6 (RKISP1_CIF_ISP_HIST_BASE + 0x0000002C)
-#define RKISP1_CIF_ISP_HIST_BIN_7 (RKISP1_CIF_ISP_HIST_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_HIST_BIN_8 (RKISP1_CIF_ISP_HIST_BASE + 0x00000034)
-#define RKISP1_CIF_ISP_HIST_BIN_9 (RKISP1_CIF_ISP_HIST_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_HIST_BIN_10 (RKISP1_CIF_ISP_HIST_BASE + 0x0000003C)
-#define RKISP1_CIF_ISP_HIST_BIN_11 (RKISP1_CIF_ISP_HIST_BASE + 0x00000040)
-#define RKISP1_CIF_ISP_HIST_BIN_12 (RKISP1_CIF_ISP_HIST_BASE + 0x00000044)
-#define RKISP1_CIF_ISP_HIST_BIN_13 (RKISP1_CIF_ISP_HIST_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_HIST_BIN_14 (RKISP1_CIF_ISP_HIST_BASE + 0x0000004C)
-#define RKISP1_CIF_ISP_HIST_BIN_15 (RKISP1_CIF_ISP_HIST_BASE + 0x00000050)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30 (RKISP1_CIF_ISP_HIST_BASE + 0x00000054)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21 (RKISP1_CIF_ISP_HIST_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12 (RKISP1_CIF_ISP_HIST_BASE + 0x0000005C)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03 (RKISP1_CIF_ISP_HIST_BASE + 0x00000060)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43 (RKISP1_CIF_ISP_HIST_BASE + 0x00000064)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34 (RKISP1_CIF_ISP_HIST_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_44 (RKISP1_CIF_ISP_HIST_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_HIST_BASE_V10 0x00002400
+#define RKISP1_CIF_ISP_HIST_PROP_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000000)
+#define RKISP1_CIF_ISP_HIST_H_OFFS_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000004)
+#define RKISP1_CIF_ISP_HIST_V_OFFS_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000008)
+#define RKISP1_CIF_ISP_HIST_H_SIZE_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_V_SIZE_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000010)
+#define RKISP1_CIF_ISP_HIST_BIN_0_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000014)
+#define RKISP1_CIF_ISP_HIST_BIN_1_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000018)
+#define RKISP1_CIF_ISP_HIST_BIN_2_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_BIN_3_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000020)
+#define RKISP1_CIF_ISP_HIST_BIN_4_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000024)
+#define RKISP1_CIF_ISP_HIST_BIN_5_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000028)
+#define RKISP1_CIF_ISP_HIST_BIN_6_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_BIN_7_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000030)
+#define RKISP1_CIF_ISP_HIST_BIN_8_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000034)
+#define RKISP1_CIF_ISP_HIST_BIN_9_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000038)
+#define RKISP1_CIF_ISP_HIST_BIN_10_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_11_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000040)
+#define RKISP1_CIF_ISP_HIST_BIN_12_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000044)
+#define RKISP1_CIF_ISP_HIST_BIN_13_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000048)
+#define RKISP1_CIF_ISP_HIST_BIN_14_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000004C)
+#define RKISP1_CIF_ISP_HIST_BIN_15_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000050)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000054)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000058)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000005C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000060)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000064)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000068)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_44_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000006C)
#define RKISP1_CIF_ISP_FILT_BASE 0x00002500
#define RKISP1_CIF_ISP_FILT_MODE (RKISP1_CIF_ISP_FILT_BASE + 0x00000000)
@@ -1060,35 +1186,38 @@
#define RKISP1_CIF_ISP_EXP_BASE 0x00002600
#define RKISP1_CIF_ISP_EXP_CTRL (RKISP1_CIF_ISP_EXP_BASE + 0x00000000)
-#define RKISP1_CIF_ISP_EXP_H_OFFSET (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
-#define RKISP1_CIF_ISP_EXP_V_OFFSET (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_EXP_H_SIZE (RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
-#define RKISP1_CIF_ISP_EXP_V_SIZE (RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
-#define RKISP1_CIF_ISP_EXP_MEAN_00 (RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
-#define RKISP1_CIF_ISP_EXP_MEAN_10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_EXP_MEAN_20 (RKISP1_CIF_ISP_EXP_BASE + 0x0000001c)
-#define RKISP1_CIF_ISP_EXP_MEAN_30 (RKISP1_CIF_ISP_EXP_BASE + 0x00000020)
-#define RKISP1_CIF_ISP_EXP_MEAN_40 (RKISP1_CIF_ISP_EXP_BASE + 0x00000024)
-#define RKISP1_CIF_ISP_EXP_MEAN_01 (RKISP1_CIF_ISP_EXP_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_EXP_MEAN_11 (RKISP1_CIF_ISP_EXP_BASE + 0x0000002c)
-#define RKISP1_CIF_ISP_EXP_MEAN_21 (RKISP1_CIF_ISP_EXP_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_EXP_MEAN_31 (RKISP1_CIF_ISP_EXP_BASE + 0x00000034)
-#define RKISP1_CIF_ISP_EXP_MEAN_41 (RKISP1_CIF_ISP_EXP_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_EXP_MEAN_02 (RKISP1_CIF_ISP_EXP_BASE + 0x0000003c)
-#define RKISP1_CIF_ISP_EXP_MEAN_12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000040)
-#define RKISP1_CIF_ISP_EXP_MEAN_22 (RKISP1_CIF_ISP_EXP_BASE + 0x00000044)
-#define RKISP1_CIF_ISP_EXP_MEAN_32 (RKISP1_CIF_ISP_EXP_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_EXP_MEAN_42 (RKISP1_CIF_ISP_EXP_BASE + 0x0000004c)
-#define RKISP1_CIF_ISP_EXP_MEAN_03 (RKISP1_CIF_ISP_EXP_BASE + 0x00000050)
-#define RKISP1_CIF_ISP_EXP_MEAN_13 (RKISP1_CIF_ISP_EXP_BASE + 0x00000054)
-#define RKISP1_CIF_ISP_EXP_MEAN_23 (RKISP1_CIF_ISP_EXP_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_EXP_MEAN_33 (RKISP1_CIF_ISP_EXP_BASE + 0x0000005c)
-#define RKISP1_CIF_ISP_EXP_MEAN_43 (RKISP1_CIF_ISP_EXP_BASE + 0x00000060)
-#define RKISP1_CIF_ISP_EXP_MEAN_04 (RKISP1_CIF_ISP_EXP_BASE + 0x00000064)
-#define RKISP1_CIF_ISP_EXP_MEAN_14 (RKISP1_CIF_ISP_EXP_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_EXP_MEAN_24 (RKISP1_CIF_ISP_EXP_BASE + 0x0000006c)
-#define RKISP1_CIF_ISP_EXP_MEAN_34 (RKISP1_CIF_ISP_EXP_BASE + 0x00000070)
-#define RKISP1_CIF_ISP_EXP_MEAN_44 (RKISP1_CIF_ISP_EXP_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_EXP_H_SIZE_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_EXP_MEAN_00_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_EXP_MEAN_10_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_EXP_MEAN_20_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000001c)
+#define RKISP1_CIF_ISP_EXP_MEAN_30_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_EXP_MEAN_40_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_EXP_MEAN_01_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_EXP_MEAN_11_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000002c)
+#define RKISP1_CIF_ISP_EXP_MEAN_21_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_EXP_MEAN_31_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_EXP_MEAN_41_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_EXP_MEAN_02_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000003c)
+#define RKISP1_CIF_ISP_EXP_MEAN_12_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_EXP_MEAN_22_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_EXP_MEAN_32_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_EXP_MEAN_42_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000004c)
+#define RKISP1_CIF_ISP_EXP_MEAN_03_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_EXP_MEAN_13_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_EXP_MEAN_23_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_EXP_MEAN_33_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000005c)
+#define RKISP1_CIF_ISP_EXP_MEAN_43_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_EXP_MEAN_04_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_EXP_MEAN_14_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_EXP_MEAN_24_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000006c)
+#define RKISP1_CIF_ISP_EXP_MEAN_34_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_EXP_MEAN_44_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_EXP_SIZE_V12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_EXP_OFFS_V12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_EXP_MEAN_V12 (RKISP1_CIF_ISP_EXP_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_BLS_BASE 0x00002700
#define RKISP1_CIF_ISP_BLS_CTRL (RKISP1_CIF_ISP_BLS_BASE + 0x00000000)
@@ -1249,6 +1378,16 @@
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000012C)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000130)
+#define RKISP1_CIF_ISP_HIST_BASE_V12 0x00002C00
+#define RKISP1_CIF_ISP_HIST_CTRL_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000000)
+#define RKISP1_CIF_ISP_HIST_SIZE_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000004)
+#define RKISP1_CIF_ISP_HIST_OFFS_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000008)
+#define RKISP1_CIF_ISP_HIST_DBG1_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_DBG2_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_DBG3_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000120)
+
#define RKISP1_CIF_ISP_VSM_BASE 0x00002F00
#define RKISP1_CIF_ISP_VSM_MODE (RKISP1_CIF_ISP_VSM_BASE + 0x00000000)
#define RKISP1_CIF_ISP_VSM_H_OFFS (RKISP1_CIF_ISP_VSM_BASE + 0x00000004)
@@ -1260,4 +1399,7 @@
#define RKISP1_CIF_ISP_VSM_DELTA_H (RKISP1_CIF_ISP_VSM_BASE + 0x0000001C)
#define RKISP1_CIF_ISP_VSM_DELTA_V (RKISP1_CIF_ISP_VSM_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_CSI0_BASE 0x00007000
+#define RKISP1_CIF_ISP_CSI0_CTRL0 (RKISP1_CIF_ISP_CSI0_BASE + 0x00000000)
+
#endif /* _RKISP1_REGS_H */
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
index e88bdd612d71..be5777c65bfb 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
@@ -174,18 +174,18 @@ rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
return vb2_queue_init(q);
}
-static void rkisp1_stats_get_awb_meas(struct rkisp1_stats *stats,
- struct rkisp1_stat_buffer *pbuf)
+static void rkisp1_stats_get_awb_meas_v10(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
{
/* Protect against concurrent access from ISR? */
struct rkisp1_device *rkisp1 = stats->rkisp1;
u32 reg_val;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
- reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT_V10);
pbuf->params.awb.awb_mean[0].cnt =
RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
- reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN_V10);
pbuf->params.awb.awb_mean[0].mean_cr_or_r =
RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
@@ -195,8 +195,29 @@ static void rkisp1_stats_get_awb_meas(struct rkisp1_stats *stats,
RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
}
-static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
- struct rkisp1_stat_buffer *pbuf)
+static void rkisp1_stats_get_awb_meas_v12(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ /* Protect against concurrent access from ISR? */
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 reg_val;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT_V12);
+ pbuf->params.awb.awb_mean[0].cnt =
+ RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN_V12);
+
+ pbuf->params.awb.awb_mean[0].mean_cr_or_r =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
+ pbuf->params.awb.awb_mean[0].mean_cb_or_b =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(reg_val);
+ pbuf->params.awb.awb_mean[0].mean_y_or_g =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
+}
+
+static void rkisp1_stats_get_aec_meas_v10(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
unsigned int i;
@@ -205,7 +226,31 @@ static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++)
pbuf->params.ae.exp_mean[i] =
(u8)rkisp1_read(rkisp1,
- RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
+ RKISP1_CIF_ISP_EXP_MEAN_00_V10 + i * 4);
+}
+
+static void rkisp1_stats_get_aec_meas_v12(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 value;
+ int i;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
+ for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V12 / 4; i++) {
+ value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_EXP_MEAN_V12 + i * 4);
+ pbuf->params.ae.exp_mean[4 * i + 0] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
+ pbuf->params.ae.exp_mean[4 * i + 1] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(value);
+ pbuf->params.ae.exp_mean[4 * i + 2] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(value);
+ pbuf->params.ae.exp_mean[4 * i + 3] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(value);
+ }
+
+ value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_EXP_MEAN_V12 + i * 4);
+ pbuf->params.ae.exp_mean[4 * i + 0] = RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
}
static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
@@ -225,17 +270,34 @@ static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
af->window[2].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_C);
}
-static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats,
- struct rkisp1_stat_buffer *pbuf)
+static void rkisp1_stats_get_hst_meas_v10(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) {
- u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+ u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0_V10 + i * 4);
+
+ pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN_V10(reg_val);
+ }
+}
- pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN(reg_val);
+static void rkisp1_stats_get_hst_meas_v12(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 value;
+ int i;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
+ for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 / 2; i++) {
+ value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_V12 + i * 4);
+ pbuf->params.hist.hist_bins[2 * i] =
+ RKISP1_CIF_ISP_HIST_GET_BIN0_V12(value);
+ pbuf->params.hist.hist_bins[2 * i + 1] =
+ RKISP1_CIF_ISP_HIST_GET_BIN1_V12(value);
}
}
@@ -286,6 +348,18 @@ static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
}
}
+static const struct rkisp1_stats_ops rkisp1_v10_stats_ops = {
+ .get_awb_meas = rkisp1_stats_get_awb_meas_v10,
+ .get_aec_meas = rkisp1_stats_get_aec_meas_v10,
+ .get_hst_meas = rkisp1_stats_get_hst_meas_v10,
+};
+
+static struct rkisp1_stats_ops rkisp1_v12_stats_ops = {
+ .get_awb_meas = rkisp1_stats_get_awb_meas_v12,
+ .get_aec_meas = rkisp1_stats_get_aec_meas_v12,
+ .get_hst_meas = rkisp1_stats_get_hst_meas_v12,
+};
+
static void
rkisp1_stats_send_measurement(struct rkisp1_stats *stats, u32 isp_ris)
{
@@ -307,18 +381,18 @@ rkisp1_stats_send_measurement(struct rkisp1_stats *stats, u32 isp_ris)
cur_stat_buf = (struct rkisp1_stat_buffer *)
vb2_plane_vaddr(&cur_buf->vb.vb2_buf, 0);
if (isp_ris & RKISP1_CIF_ISP_AWB_DONE)
- rkisp1_stats_get_awb_meas(stats, cur_stat_buf);
+ stats->ops->get_awb_meas(stats, cur_stat_buf);
if (isp_ris & RKISP1_CIF_ISP_AFM_FIN)
rkisp1_stats_get_afc_meas(stats, cur_stat_buf);
if (isp_ris & RKISP1_CIF_ISP_EXP_END) {
- rkisp1_stats_get_aec_meas(stats, cur_stat_buf);
+ stats->ops->get_aec_meas(stats, cur_stat_buf);
rkisp1_stats_get_bls_meas(stats, cur_stat_buf);
}
if (isp_ris & RKISP1_CIF_ISP_HIST_MEASURE_RDY)
- rkisp1_stats_get_hst_meas(stats, cur_stat_buf);
+ stats->ops->get_hst_meas(stats, cur_stat_buf);
vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0,
sizeof(struct rkisp1_stat_buffer));
@@ -352,6 +426,11 @@ static void rkisp1_init_stats(struct rkisp1_stats *stats)
V4L2_META_FMT_RK_ISP1_STAT_3A;
stats->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_stat_buffer);
+
+ if (stats->rkisp1->media_dev.hw_revision == RKISP1_V12)
+ stats->ops = &rkisp1_v12_stats_ops;
+ else
+ stats->ops = &rkisp1_v10_stats_ops;
}
int rkisp1_stats_register(struct rkisp1_device *rkisp1)
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index e1d51fd3e700..32892ab359ee 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -23,7 +23,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
@@ -402,7 +401,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
struct s3c_camif_plat_data *pdata = dev->platform_data;
struct s3c_camif_drvdata *drvdata;
struct camif_dev *camif;
- struct resource *mres;
int ret = 0;
camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
@@ -423,9 +421,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
drvdata = (void *)platform_get_device_id(pdev)->driver_data;
camif->variant = drvdata->variant;
- mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- camif->io_base = devm_ioremap_resource(dev, mres);
+ camif->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(camif->io_base))
return PTR_ERR(camif->io_base);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 1cb5eaabf340..fa0bb31bd2b9 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -635,9 +635,7 @@ static int g2d_probe(struct platform_device *pdev)
mutex_init(&dev->mutex);
atomic_set(&dev->num_inst, 0);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
return PTR_ERR(dev->regs);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 7d0ab19c38bb..ebdfd24e9cd5 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2850,7 +2850,6 @@ static void *jpeg_get_drv_data(struct device *dev);
static int s5p_jpeg_probe(struct platform_device *pdev)
{
struct s5p_jpeg *jpeg;
- struct resource *res;
int i, ret;
/* JPEG IP abstraction struct */
@@ -2867,9 +2866,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->dev = &pdev->dev;
/* memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- jpeg->regs = devm_ioremap_resource(&pdev->dev, res);
+ jpeg->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->regs))
return PTR_ERR(jpeg->regs);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index eba2b9f040df..fc85e4e2d020 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1283,14 +1283,17 @@ static int s5p_mfc_probe(struct platform_device *pdev)
spin_lock_init(&dev->condlock);
dev->plat_dev = pdev;
if (!dev->plat_dev) {
- dev_err(&pdev->dev, "No platform data specified\n");
+ mfc_err("No platform data specified\n");
return -ENODEV;
}
dev->variant = of_device_get_match_data(&pdev->dev);
+ if (!dev->variant) {
+ dev_err(&pdev->dev, "Failed to get device MFC hardware variant information\n");
+ return -ENOENT;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 6413cd279125..7d467f2ba072 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1315,8 +1315,7 @@ static int bdisp_probe(struct platform_device *pdev)
mutex_init(&bdisp->lock);
/* get resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdisp->regs = devm_ioremap_resource(dev, res);
+ bdisp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdisp->regs)) {
ret = PTR_ERR(bdisp->regs);
goto err_wq;
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 338b205ae3a7..02dc78bd7fab 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -28,7 +28,6 @@
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/wait.h>
#include <linux/pinctrl/pinctrl.h>
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
index 0560a9cb004b..feb48cb546d7 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/version.h>
#include <dt-bindings/media/c8sectpfe.h>
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index 30fb1aa4a351..15e8f83b1b56 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -298,15 +298,13 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
{
struct device *dev = &pdev->dev;
- struct resource *regs;
struct resource *esram;
int ret;
WARN_ON(!hva);
/* get memory for registers */
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hva->regs = devm_ioremap_resource(dev, regs);
+ hva->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hva->regs)) {
dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
return PTR_ERR(hva->regs);
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index d914ccef9831..e1b17c05229c 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -128,6 +128,7 @@ struct stm32_dcmi {
int sequence;
struct list_head buffers;
struct dcmi_buf *active;
+ int irq;
struct v4l2_device v4l2_dev;
struct video_device *vdev;
@@ -1759,6 +1760,14 @@ static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
+ ret = devm_request_threaded_irq(dcmi->dev, dcmi->irq, dcmi_irq_callback,
+ dcmi_irq_thread, IRQF_ONESHOT,
+ dev_name(dcmi->dev), dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "Unable to request irq %d\n", dcmi->irq);
+ return ret;
+ }
+
return 0;
}
@@ -1824,11 +1833,11 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
return -EINVAL;
}
- v4l2_async_notifier_init(&dcmi->notifier);
+ v4l2_async_nf_init(&dcmi->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &dcmi->notifier, of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&dcmi->notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
of_node_put(ep);
@@ -1839,10 +1848,10 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
dcmi->notifier.ops = &dcmi_graph_notify_ops;
- ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
+ ret = v4l2_async_nf_register(&dcmi->v4l2_dev, &dcmi->notifier);
if (ret < 0) {
dev_err(dcmi->dev, "Failed to register notifier\n");
- v4l2_async_notifier_cleanup(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
return ret;
}
@@ -1914,6 +1923,8 @@ static int dcmi_probe(struct platform_device *pdev)
if (irq <= 0)
return irq ? irq : -ENXIO;
+ dcmi->irq = irq;
+
dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!dcmi->res) {
dev_err(&pdev->dev, "Could not get resource\n");
@@ -1926,14 +1937,6 @@ static int dcmi_probe(struct platform_device *pdev)
return PTR_ERR(dcmi->regs);
}
- ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
- dcmi_irq_thread, IRQF_ONESHOT,
- dev_name(&pdev->dev), dcmi);
- if (ret) {
- dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
- return ret;
- }
-
mclk = devm_clk_get(&pdev->dev, "mclk");
if (IS_ERR(mclk)) {
if (PTR_ERR(mclk) != -EPROBE_DEFER)
@@ -2060,7 +2063,7 @@ static int dcmi_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- v4l2_async_notifier_cleanup(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
err_media_entity_cleanup:
media_entity_cleanup(&dcmi->vdev->entity);
err_device_release:
@@ -2080,8 +2083,8 @@ static int dcmi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- v4l2_async_notifier_unregister(&dcmi->notifier);
- v4l2_async_notifier_cleanup(&dcmi->notifier);
+ v4l2_async_nf_unregister(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
media_entity_cleanup(&dcmi->vdev->entity);
v4l2_device_unregister(&dcmi->v4l2_dev);
media_device_cleanup(&dcmi->mdev);
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index 8d40a7acba9c..80a10f238bbe 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -122,7 +122,7 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
struct fwnode_handle *ep;
int ret;
- v4l2_async_notifier_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -135,8 +135,8 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
csi->bus = vep.bus.parallel;
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&csi->notifier, ep,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
@@ -154,7 +154,6 @@ static int sun4i_csi_probe(struct platform_device *pdev)
struct v4l2_subdev *subdev;
struct video_device *vdev;
struct sun4i_csi *csi;
- struct resource *res;
int ret;
int irq;
@@ -179,8 +178,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
media_device_init(&csi->mdev);
csi->v4l.mdev = &csi->mdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi->regs = devm_ioremap_resource(&pdev->dev, res);
+ csi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi->regs))
return PTR_ERR(csi->regs);
@@ -244,7 +242,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_media;
- ret = v4l2_async_notifier_register(&csi->v4l, &csi->notifier);
+ ret = v4l2_async_nf_register(&csi->v4l, &csi->notifier);
if (ret) {
dev_err(csi->dev, "Couldn't register our notifier.\n");
goto err_unregister_media;
@@ -268,8 +266,8 @@ static int sun4i_csi_remove(struct platform_device *pdev)
{
struct sun4i_csi *csi = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
vb2_video_unregister_device(&csi->vdev);
media_device_unregister(&csi->mdev);
sun4i_csi_dma_unregister(csi);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index 27935f1e9555..fc96921b0583 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -61,7 +61,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
|| sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_BT656)
&& sdev->csi.v4l2_ep.bus.parallel.bus_width == 16) {
switch (pixformat) {
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -124,7 +124,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
case V4L2_PIX_FMT_VYUY:
return (mbus_code == MEDIA_BUS_FMT_VYUY8_2X8);
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -269,7 +269,7 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev,
case V4L2_PIX_FMT_VYUY:
return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8;
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
return buf_interlaced ? CSI_FRAME_MB_YUV420 :
CSI_FIELD_MB_YUV420;
case V4L2_PIX_FMT_NV12:
@@ -311,7 +311,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
return 0;
switch (pixformat) {
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_YUV420:
@@ -526,7 +526,7 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev)
planar_offset[0] = 0;
switch (config->pixelformat) {
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -717,8 +717,8 @@ static int sun6i_csi_fwnode_parse(struct device *dev,
static void sun6i_csi_v4l2_cleanup(struct sun6i_csi *csi)
{
media_device_unregister(&csi->media_dev);
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
sun6i_video_cleanup(&csi->video);
v4l2_device_unregister(&csi->v4l2_dev);
v4l2_ctrl_handler_free(&csi->ctrl_handler);
@@ -737,7 +737,7 @@ static int sun6i_csi_v4l2_init(struct sun6i_csi *csi)
"platform:%s", dev_name(csi->dev));
media_device_init(&csi->media_dev);
- v4l2_async_notifier_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier);
ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 0);
if (ret) {
@@ -759,16 +759,17 @@ static int sun6i_csi_v4l2_init(struct sun6i_csi *csi)
if (ret)
goto unreg_v4l2;
- ret = v4l2_async_notifier_parse_fwnode_endpoints(csi->dev,
- &csi->notifier,
- sizeof(struct v4l2_async_subdev),
- sun6i_csi_fwnode_parse);
+ ret = v4l2_async_nf_parse_fwnode_endpoints(csi->dev,
+ &csi->notifier,
+ sizeof(struct
+ v4l2_async_subdev),
+ sun6i_csi_fwnode_parse);
if (ret)
goto clean_video;
csi->notifier.ops = &sun6i_csi_async_ops;
- ret = v4l2_async_notifier_register(&csi->v4l2_dev, &csi->notifier);
+ ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier);
if (ret) {
dev_err(csi->dev, "notifier registration failed\n");
goto clean_video;
@@ -783,7 +784,7 @@ unreg_v4l2:
free_ctrl:
v4l2_ctrl_handler_free(&csi->ctrl_handler);
clean_media:
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
media_device_cleanup(&csi->media_dev);
return ret;
@@ -832,13 +833,11 @@ static const struct regmap_config sun6i_csi_regmap_config = {
static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
struct platform_device *pdev)
{
- struct resource *res;
void __iomem *io_base;
int ret;
int irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io_base = devm_ioremap_resource(&pdev->dev, res);
+ io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
index c626821aaedb..3a38d107ae3f 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
@@ -105,7 +105,7 @@ static inline int sun6i_csi_get_bpp(unsigned int pixformat)
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_YUV420:
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 07b2161392d2..607a8d39fbe2 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -48,7 +48,7 @@ static const u32 supported_pixformats[] = {
V4L2_PIX_FMT_YVYU,
V4L2_PIX_FMT_UYVY,
V4L2_PIX_FMT_VYUY,
- V4L2_PIX_FMT_HM12,
+ V4L2_PIX_FMT_NV12_16L16,
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_NV21,
V4L2_PIX_FMT_YUV420,
@@ -467,7 +467,7 @@ static const struct v4l2_ioctl_ops sun6i_video_ioctl_ops = {
static int sun6i_video_open(struct file *file)
{
struct sun6i_video *video = video_drvdata(file);
- int ret;
+ int ret = 0;
if (mutex_lock_interruptible(&video->lock))
return -ERESTARTSYS;
@@ -481,10 +481,8 @@ static int sun6i_video_open(struct file *file)
goto fh_release;
/* check if already powered */
- if (!v4l2_fh_is_singular_file(file)) {
- ret = -EBUSY;
+ if (!v4l2_fh_is_singular_file(file))
goto unlock;
- }
ret = sun6i_csi_set_power(video->csi, true);
if (ret < 0)
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index 671e4a928993..aa65d70b6270 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -803,7 +803,6 @@ static int deinterlace_probe(struct platform_device *pdev)
{
struct deinterlace_dev *dev;
struct video_device *vfd;
- struct resource *res;
int irq, ret;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -825,8 +824,7 @@ static int deinterlace_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(&pdev->dev, res);
+ dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->base))
return PTR_ERR(dev->base);
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 8e469d518a74..4a4a6c5983f7 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -781,7 +781,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
unsigned int i;
int ret;
- v4l2_async_notifier_init(&cal->notifier);
+ v4l2_async_nf_init(&cal->notifier);
cal->notifier.ops = &cal_async_notifier_ops;
for (i = 0; i < cal->data->num_csi2_phy; ++i) {
@@ -793,9 +793,9 @@ static int cal_async_notifier_register(struct cal_dev *cal)
continue;
fwnode = of_fwnode_handle(phy->source_node);
- casd = v4l2_async_notifier_add_fwnode_subdev(&cal->notifier,
- fwnode,
- struct cal_v4l2_async_subdev);
+ casd = v4l2_async_nf_add_fwnode(&cal->notifier,
+ fwnode,
+ struct cal_v4l2_async_subdev);
if (IS_ERR(casd)) {
phy_err(phy, "Failed to add subdev to notifier\n");
ret = PTR_ERR(casd);
@@ -805,7 +805,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
casd->phy = phy;
}
- ret = v4l2_async_notifier_register(&cal->v4l2_dev, &cal->notifier);
+ ret = v4l2_async_nf_register(&cal->v4l2_dev, &cal->notifier);
if (ret) {
cal_err(cal, "Error registering async notifier\n");
goto error;
@@ -814,14 +814,14 @@ static int cal_async_notifier_register(struct cal_dev *cal)
return 0;
error:
- v4l2_async_notifier_cleanup(&cal->notifier);
+ v4l2_async_nf_cleanup(&cal->notifier);
return ret;
}
static void cal_async_notifier_unregister(struct cal_dev *cal)
{
- v4l2_async_notifier_unregister(&cal->notifier);
- v4l2_async_notifier_cleanup(&cal->notifier);
+ v4l2_async_nf_unregister(&cal->notifier);
+ v4l2_async_nf_cleanup(&cal->notifier);
}
/* ------------------------------------------------------------------
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 3655573e8581..95483c84c3f2 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -132,11 +132,11 @@ static struct via_camera *via_cam_info;
* Debugging and related.
*/
#define cam_err(cam, fmt, arg...) \
- dev_err(&(cam)->platdev->dev, fmt, ##arg);
+ dev_err(&(cam)->platdev->dev, fmt, ##arg)
#define cam_warn(cam, fmt, arg...) \
- dev_warn(&(cam)->platdev->dev, fmt, ##arg);
+ dev_warn(&(cam)->platdev->dev, fmt, ##arg)
#define cam_dbg(cam, fmt, arg...) \
- dev_dbg(&(cam)->platdev->dev, fmt, ##arg);
+ dev_dbg(&(cam)->platdev->dev, fmt, ##arg)
/*
* Format handling. This is ripped almost directly from Hans's changes
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 905005e271ca..fda8fc0e4814 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -360,7 +360,7 @@ static int video_mux_async_register(struct video_mux *vmux,
unsigned int i;
int ret;
- v4l2_async_notifier_init(&vmux->notifier);
+ v4l2_async_nf_init(&vmux->notifier);
for (i = 0; i < num_input_pads; i++) {
struct v4l2_async_subdev *asd;
@@ -380,8 +380,8 @@ static int video_mux_async_register(struct video_mux *vmux,
}
fwnode_handle_put(remote_ep);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &vmux->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&vmux->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
@@ -395,8 +395,7 @@ static int video_mux_async_register(struct video_mux *vmux,
vmux->notifier.ops = &video_mux_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&vmux->subdev,
- &vmux->notifier);
+ ret = v4l2_async_subdev_nf_register(&vmux->subdev, &vmux->notifier);
if (ret)
return ret;
@@ -477,8 +476,8 @@ static int video_mux_probe(struct platform_device *pdev)
ret = video_mux_async_register(vmux, num_pads - 1);
if (ret) {
- v4l2_async_notifier_unregister(&vmux->notifier);
- v4l2_async_notifier_cleanup(&vmux->notifier);
+ v4l2_async_nf_unregister(&vmux->notifier);
+ v4l2_async_nf_cleanup(&vmux->notifier);
}
return ret;
@@ -489,8 +488,8 @@ static int video_mux_remove(struct platform_device *pdev)
struct video_mux *vmux = platform_get_drvdata(pdev);
struct v4l2_subdev *sd = &vmux->subdev;
- v4l2_async_notifier_unregister(&vmux->notifier);
- v4l2_async_notifier_cleanup(&vmux->notifier);
+ v4l2_async_nf_unregister(&vmux->notifier);
+ v4l2_async_nf_cleanup(&vmux->notifier);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index 06f74d410973..0c2507dc03d6 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -455,6 +455,10 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
dev_err(vsp1->dev, "%s: failed to setup UIF after %s\n",
__func__, BRX_NAME(pipe->brx));
+ /* If the DRM pipe does not have a UIF there is nothing we can update. */
+ if (!drm_pipe->uif)
+ return 0;
+
/*
* If the UIF is not in use schedule it for removal by setting its pipe
* pointer to NULL, vsp1_du_pipeline_configure() will remove it from the
@@ -462,9 +466,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
* make sure it is present in the pipeline's list of entities if it
* wasn't already.
*/
- if (drm_pipe->uif && !use_uif) {
+ if (!use_uif) {
drm_pipe->uif->pipe = NULL;
- } else if (drm_pipe->uif && !drm_pipe->uif->pipe) {
+ } else if (!drm_pipe->uif->pipe) {
drm_pipe->uif->pipe = pipe;
list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities);
}
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index de442d6c9926..c9044785b903 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -44,7 +44,7 @@
static irqreturn_t vsp1_irq_handler(int irq, void *data)
{
- u32 mask = VI6_WFP_IRQ_STA_DFE | VI6_WFP_IRQ_STA_FRE;
+ u32 mask = VI6_WPF_IRQ_STA_DFE | VI6_WPF_IRQ_STA_FRE;
struct vsp1_device *vsp1 = data;
irqreturn_t ret = IRQ_NONE;
unsigned int i;
@@ -59,7 +59,7 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
- if (status & VI6_WFP_IRQ_STA_DFE) {
+ if (status & VI6_WPF_IRQ_STA_DFE) {
vsp1_pipeline_frame_end(wpf->entity.pipe);
ret = IRQ_HANDLED;
}
@@ -777,6 +777,16 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uif_count = 2,
.wpf_count = 2,
.num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V3U,
+ .model = "VSP2-D",
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 2,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
},
};
@@ -785,7 +795,6 @@ static int vsp1_probe(struct platform_device *pdev)
struct vsp1_device *vsp1;
struct device_node *fcp_node;
struct resource *irq;
- struct resource *io;
unsigned int i;
int ret;
@@ -800,8 +809,7 @@ static int vsp1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vsp1);
/* I/O and IRQ resources (clock managed by the clock PM domain). */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vsp1->mmio = devm_ioremap_resource(&pdev->dev, io);
+ vsp1->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vsp1->mmio))
return PTR_ERR(vsp1->mmio);
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index fe3130db1fa2..fae7286eb01e 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -32,12 +32,12 @@
#define VI6_STATUS_SYS_ACT(n) BIT((n) + 8)
#define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12)
-#define VI6_WFP_IRQ_ENB_DFEE BIT(1)
-#define VI6_WFP_IRQ_ENB_FREE BIT(0)
+#define VI6_WPF_IRQ_ENB_DFEE BIT(1)
+#define VI6_WPF_IRQ_ENB_FREE BIT(0)
#define VI6_WPF_IRQ_STA(n) (0x004c + (n) * 12)
-#define VI6_WFP_IRQ_STA_DFE BIT(1)
-#define VI6_WFP_IRQ_STA_FRE BIT(0)
+#define VI6_WPF_IRQ_STA_DFE BIT(1)
+#define VI6_WPF_IRQ_STA_FRE BIT(0)
#define VI6_DISP_IRQ_ENB(n) (0x0078 + (n) * 60)
#define VI6_DISP_IRQ_ENB_DSTE BIT(8)
@@ -766,6 +766,8 @@
#define VI6_IP_VERSION_MODEL_VSPD_V3 (0x18 << 8)
#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_V3U (0x1c << 8)
+
#define VI6_IP_VERSION_SOC_MASK (0xff << 0)
#define VI6_IP_VERSION_SOC_H2 (0x01 << 0)
#define VI6_IP_VERSION_SOC_V2H (0x01 << 0)
@@ -777,6 +779,7 @@
#define VI6_IP_VERSION_SOC_D3 (0x04 << 0)
#define VI6_IP_VERSION_SOC_M3N (0x04 << 0)
#define VI6_IP_VERSION_SOC_E3 (0x04 << 0)
+#define VI6_IP_VERSION_SOC_V3U (0x05 << 0)
/* -----------------------------------------------------------------------------
* RPF CLUT Registers
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 208498fa6ed7..94e91d7bb56c 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -342,7 +342,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
/* Enable interrupts. */
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(index), 0);
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(index),
- VI6_WFP_IRQ_ENB_DFEE);
+ VI6_WPF_IRQ_ENB_DFEE);
/*
* Configure writeback for display pipelines (the wpf writeback flag is
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 425a32dd5d19..a0073122798f 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -205,10 +205,8 @@ EXPORT_SYMBOL_GPL(xvip_clr_and_set);
int xvip_init_resources(struct xvip_device *xvip)
{
struct platform_device *pdev = to_platform_device(xvip->dev);
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xvip->iomem = devm_ioremap_resource(xvip->dev, res);
+ xvip->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xvip->iomem))
return PTR_ERR(xvip->iomem);
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index 2ce31d7ce1a6..f34f8b077e03 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -382,9 +382,8 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
continue;
}
- xge = v4l2_async_notifier_add_fwnode_subdev(
- &xdev->notifier, remote,
- struct xvip_graph_entity);
+ xge = v4l2_async_nf_add_fwnode(&xdev->notifier, remote,
+ struct xvip_graph_entity);
fwnode_handle_put(remote);
if (IS_ERR(xge)) {
ret = PTR_ERR(xge);
@@ -395,7 +394,7 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
return 0;
err_notifier_cleanup:
- v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_nf_cleanup(&xdev->notifier);
fwnode_handle_put(ep);
return ret;
}
@@ -420,7 +419,7 @@ static int xvip_graph_parse(struct xvip_composite_device *xdev)
entity = to_xvip_entity(asd);
ret = xvip_graph_parse_one(xdev, entity->asd.match.fwnode);
if (ret < 0) {
- v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_nf_cleanup(&xdev->notifier);
break;
}
}
@@ -496,8 +495,8 @@ static void xvip_graph_cleanup(struct xvip_composite_device *xdev)
struct xvip_dma *dmap;
struct xvip_dma *dma;
- v4l2_async_notifier_unregister(&xdev->notifier);
- v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_nf_unregister(&xdev->notifier);
+ v4l2_async_nf_cleanup(&xdev->notifier);
list_for_each_entry_safe(dma, dmap, &xdev->dmas, list) {
xvip_dma_cleanup(dma);
@@ -532,7 +531,7 @@ static int xvip_graph_init(struct xvip_composite_device *xdev)
/* Register the subdevices notifier. */
xdev->notifier.ops = &xvip_graph_notify_ops;
- ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
+ ret = v4l2_async_nf_register(&xdev->v4l2_dev, &xdev->notifier);
if (ret < 0) {
dev_err(xdev->dev, "notifier registration failed\n");
goto done;
@@ -596,7 +595,7 @@ static int xvip_composite_probe(struct platform_device *pdev)
xdev->dev = &pdev->dev;
INIT_LIST_HEAD(&xdev->dmas);
- v4l2_async_notifier_init(&xdev->notifier);
+ v4l2_async_nf_init(&xdev->notifier);
ret = xvip_composite_v4l2_init(xdev);
if (ret < 0)
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 112376873167..484046471c03 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1279,7 +1279,7 @@ static int wl1273_fm_vidioc_querycap(struct file *file, void *priv,
strscpy(capability->driver, WL1273_FM_DRIVER_NAME,
sizeof(capability->driver));
- strscpy(capability->card, "Texas Instruments Wl1273 FM Radio",
+ strscpy(capability->card, "TI Wl1273 FM Radio",
sizeof(capability->card));
strscpy(capability->bus_info, radio->bus_type,
sizeof(capability->bus_info));
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index f491420d7b53..a972c0705ac7 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -11,7 +11,7 @@
/* driver definitions */
#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
-#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
+#define DRIVER_CARD "Silicon Labs Si470x FM Radio"
#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.2"
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index fedff68d8c49..3f8634a46573 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -16,7 +16,7 @@
/* driver definitions */
#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>"
-#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
+#define DRIVER_CARD "Silicon Labs Si470x FM Radio"
#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.10"
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index fd5a7a058714..9506baf3c4c1 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -453,14 +453,6 @@ config IR_SERIAL_TRANSMITTER
help
Serial Port Transmitter support
-config IR_SIR
- tristate "Built-in SIR IrDA port"
- help
- Say Y if you want to use a IrDA SIR port Transceivers.
-
- To compile this driver as a module, choose M here: the module will
- be called sir-ir.
-
config RC_XBOX_DVD
tristate "Xbox DVD Movie Playback Kit"
depends on USB
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 0db51fad27d6..378d62d21e06 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_RC_ST) += st_rc.o
obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
obj-$(CONFIG_IR_IMG) += img-ir/
obj-$(CONFIG_IR_SERIAL) += serial_ir.o
-obj-$(CONFIG_IR_SIR) += sir_ir.o
obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_RC_XBOX_DVD) += xbox_remote.o
obj-$(CONFIG_IR_TOY) += ir_toy.o
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index 094aa6a06315..6f8464872033 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -76,7 +76,6 @@ static void img_ir_ident(struct img_ir_priv *priv)
static int img_ir_probe(struct platform_device *pdev)
{
struct img_ir_priv *priv;
- struct resource *res_regs;
int irq, error, error2;
/* Get resources from platform device */
@@ -94,8 +93,7 @@ static int img_ir_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
/* Ioremap the registers */
- res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->reg_base = devm_ioremap_resource(&pdev->dev, res_regs);
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 2ca4e86c7b9f..54da6f60079b 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2358,8 +2358,10 @@ urb_submit_failed:
touch_setup_failed:
find_endpoint_failed:
usb_put_dev(ictx->usbdev_intf1);
+ ictx->usbdev_intf1 = NULL;
mutex_unlock(&ictx->lock);
usb_free_urb(rx_urb);
+ ictx->rx_urb_intf1 = NULL;
rx_urb_alloc_failed:
dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret);
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 4609fb4519e9..e0be6471afe5 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -249,7 +249,6 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
{
struct rc_dev *rdev;
struct device *dev = &pdev->dev;
- struct resource *res;
struct hix5hd2_ir_priv *priv;
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *of_id;
@@ -274,8 +273,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
priv->regmap = NULL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 48d52baec1a1..7e98e7e3aace 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -4,7 +4,9 @@
* Infrared Toy and IR Droid RC core driver
*
* Copyright (C) 2020 Sean Young <sean@mess.org>
-
+ *
+ * http://dangerousprototypes.com/docs/USB_IR_Toy:_Sampling_mode
+ *
* This driver is based on the lirc driver which can be found here:
* https://sourceforge.net/p/lirc/git/ci/master/tree/plugins/irtoy.c
* Copyright (C) 2011 Peter Kooiman <pkooiman@gmail.com>
@@ -46,7 +48,7 @@ static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
enum state {
STATE_IRDATA,
- STATE_RESET,
+ STATE_COMMAND_NO_RESP,
STATE_COMMAND,
STATE_TX,
};
@@ -121,6 +123,7 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
len, irtoy->in);
}
break;
+ case STATE_COMMAND_NO_RESP:
case STATE_IRDATA: {
struct ir_raw_event rawir = { .pulse = irtoy->pulse };
__be16 *in = (__be16 *)irtoy->in;
@@ -166,10 +169,8 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
int err;
if (len != 1 || space > MAX_PACKET || space == 0) {
- dev_err(irtoy->dev, "packet length expected: %*phN\n",
+ dev_dbg(irtoy->dev, "packet length expected: %*phN\n",
len, irtoy->in);
- irtoy->state = STATE_IRDATA;
- complete(&irtoy->command_done);
break;
}
@@ -193,9 +194,6 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
irtoy->tx_len -= buf_len;
}
break;
- case STATE_RESET:
- dev_err(irtoy->dev, "unexpected response to reset: %*phN\n",
- len, irtoy->in);
}
}
@@ -204,7 +202,7 @@ static void irtoy_out_callback(struct urb *urb)
struct irtoy *irtoy = urb->context;
if (urb->status == 0) {
- if (irtoy->state == STATE_RESET)
+ if (irtoy->state == STATE_COMMAND_NO_RESP)
complete(&irtoy->command_done);
} else {
dev_warn(irtoy->dev, "out urb status: %d\n", urb->status);
@@ -216,10 +214,20 @@ static void irtoy_in_callback(struct urb *urb)
struct irtoy *irtoy = urb->context;
int ret;
- if (urb->status == 0)
+ switch (urb->status) {
+ case 0:
irtoy_response(irtoy, urb->actual_length);
- else
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ case -EPIPE:
+ usb_unlink_urb(urb);
+ return;
+ default:
dev_dbg(irtoy->dev, "in urb status: %d\n", urb->status);
+ }
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret && ret != -ENODEV)
@@ -256,7 +264,7 @@ static int irtoy_setup(struct irtoy *irtoy)
int err;
err = irtoy_command(irtoy, COMMAND_RESET, sizeof(COMMAND_RESET),
- STATE_RESET);
+ STATE_COMMAND_NO_RESP);
if (err != 0) {
dev_err(irtoy->dev, "could not write reset command: %d\n",
err);
@@ -310,7 +318,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
buf[i] = cpu_to_be16(v);
}
- buf[count] = 0xffff;
+ buf[count] = cpu_to_be16(0xffff);
irtoy->tx_buf = buf;
irtoy->tx_len = size;
@@ -321,7 +329,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
// with its led on. It does not respond to any command when this
// happens. To work around this, re-enter sample mode.
err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
- sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
+ sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP);
if (err) {
dev_err(irtoy->dev, "exit sample mode: %d\n", err);
return err;
@@ -357,6 +365,27 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
return count;
}
+static int irtoy_tx_carrier(struct rc_dev *rc, uint32_t carrier)
+{
+ struct irtoy *irtoy = rc->priv;
+ u8 buf[3];
+ int err;
+
+ if (carrier < 11800)
+ return -EINVAL;
+
+ buf[0] = 0x06;
+ buf[1] = DIV_ROUND_CLOSEST(48000000, 16 * carrier) - 1;
+ buf[2] = 0;
+
+ err = irtoy_command(irtoy, buf, sizeof(buf), STATE_COMMAND_NO_RESP);
+ if (err)
+ dev_err(irtoy->dev, "could not write carrier command: %d\n",
+ err);
+
+ return err;
+}
+
static int irtoy_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -436,8 +465,9 @@ static int irtoy_probe(struct usb_interface *intf,
if (err)
goto free_rcdev;
- dev_info(irtoy->dev, "version: hardware %u, firmware %u, protocol %u",
- irtoy->hw_version, irtoy->sw_version, irtoy->proto_version);
+ dev_info(irtoy->dev, "version: hardware %u, firmware %u.%u, protocol %u",
+ irtoy->hw_version, irtoy->sw_version / 10,
+ irtoy->sw_version % 10, irtoy->proto_version);
if (irtoy->sw_version < MIN_FW_VERSION) {
dev_err(irtoy->dev, "need firmware V%02u or higher",
@@ -455,6 +485,7 @@ static int irtoy_probe(struct usb_interface *intf,
rc->dev.parent = &intf->dev;
rc->priv = irtoy;
rc->tx_ir = irtoy_tx;
+ rc->s_tx_carrier = irtoy_tx_carrier;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->map_name = RC_MAP_RC6_MCE;
rc->rx_resolution = UNIT_US;
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 5bc23e8c6d91..4f77d4ebacdc 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -242,7 +242,7 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
}
/* check for the receive interrupt */
- if (iflags & ITE_IRQ_RX_FIFO) {
+ if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
/* read the FIFO bytes */
rx_bytes = dev->params->get_rx_bytes(dev, rx_buf,
ITE_RX_FIFO_LEN);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index e03dd1f0144f..d09bee82c04c 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1386,6 +1386,7 @@ static void mceusb_dev_recv(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -EILSEQ:
+ case -EPROTO:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
@@ -1612,6 +1613,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->dev.parent = dev;
rc->priv = ir;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
+ rc->rx_resolution = MCE_TIME_UNIT;
rc->min_timeout = MCE_TIME_UNIT;
rc->timeout = MS_TO_US(100);
if (!mceusb_model[ir->model].broken_irtimeout) {
diff --git a/drivers/media/rc/meson-ir-tx.c b/drivers/media/rc/meson-ir-tx.c
index 3055f8e1b6ff..c22cd26a5c07 100644
--- a/drivers/media/rc/meson-ir-tx.c
+++ b/drivers/media/rc/meson-ir-tx.c
@@ -395,7 +395,6 @@ static struct platform_driver meson_irtx_pd = {
.remove = meson_irtx_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = meson_irtx_dt_match,
},
};
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index dad55950dfc6..4b769111f78e 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -102,7 +102,6 @@ static int meson_ir_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
const char *map_name;
struct meson_ir *ir;
int irq, ret;
@@ -111,8 +110,7 @@ static int meson_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ir->reg = devm_ioremap_resource(dev, res);
+ ir->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->reg))
return PTR_ERR(ir->reg);
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index 65a136c0fac2..840e7aec5c21 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -292,7 +292,6 @@ static int mtk_ir_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
- struct resource *res;
struct mtk_ir *ir;
u32 val;
int ret = 0;
@@ -320,8 +319,7 @@ static int mtk_ir_probe(struct platform_device *pdev)
ir->bus = ir->clk;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ir->base = devm_ioremap_resource(dev, res);
+ ir->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->base))
return PTR_ERR(ir->base);
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
deleted file mode 100644
index 6ec96dc34586..000000000000
--- a/drivers/media/rc/sir_ir.c
+++ /dev/null
@@ -1,438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * IR SIR driver, (C) 2000 Milan Pikula <www@fornax.sk>
- *
- * sir_ir - Device driver for use with SIR (serial infra red)
- * mode of IrDA on many notebooks.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/ktime.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-
-#include <media/rc-core.h>
-
-/* SECTION: Definitions */
-#define PULSE '['
-
-/* 9bit * 1s/115200bit in milli seconds = 78.125ms*/
-#define TIME_CONST (9000000ul / 115200ul)
-
-/* timeout for sequences in jiffies (=5/100s), must be longer than TIME_CONST */
-#define SIR_TIMEOUT (HZ * 5 / 100)
-
-/* onboard sir ports are typically com3 */
-static int io = 0x3e8;
-static int irq = 4;
-static int threshold = 3;
-
-static DEFINE_SPINLOCK(timer_lock);
-static struct timer_list timerlist;
-/* time of last signal change detected */
-static ktime_t last;
-/* time of last UART data ready interrupt */
-static ktime_t last_intr_time;
-static int last_value;
-static struct rc_dev *rcdev;
-
-static struct platform_device *sir_ir_dev;
-
-static DEFINE_SPINLOCK(hardware_lock);
-
-/* SECTION: Prototypes */
-
-/* Communication with user-space */
-static void add_read_queue(int flag, unsigned long val);
-/* Hardware */
-static irqreturn_t sir_interrupt(int irq, void *dev_id);
-static void send_space(unsigned long len);
-static void send_pulse(unsigned long len);
-static int init_hardware(void);
-static void drop_hardware(void);
-/* Initialisation */
-
-static inline unsigned int sinp(int offset)
-{
- return inb(io + offset);
-}
-
-static inline void soutp(int offset, int value)
-{
- outb(value, io + offset);
-}
-
-/* SECTION: Communication with user-space */
-static int sir_tx_ir(struct rc_dev *dev, unsigned int *tx_buf,
- unsigned int count)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < count;) {
- if (tx_buf[i])
- send_pulse(tx_buf[i]);
- i++;
- if (i >= count)
- break;
- if (tx_buf[i])
- send_space(tx_buf[i]);
- i++;
- }
- local_irq_restore(flags);
-
- return count;
-}
-
-static void add_read_queue(int flag, unsigned long val)
-{
- struct ir_raw_event ev = {};
-
- pr_debug("add flag %d with val %lu\n", flag, val);
-
- /*
- * statistically, pulses are ~TIME_CONST/2 too long. we could
- * maybe make this more exact, but this is good enough
- */
- if (flag) {
- /* pulse */
- if (val > TIME_CONST / 2)
- val -= TIME_CONST / 2;
- else /* should not ever happen */
- val = 1;
- ev.pulse = true;
- } else {
- val += TIME_CONST / 2;
- }
- ev.duration = val;
-
- ir_raw_event_store_with_filter(rcdev, &ev);
-}
-
-/* SECTION: Hardware */
-static void sir_timeout(struct timer_list *unused)
-{
- /*
- * if last received signal was a pulse, but receiving stopped
- * within the 9 bit frame, we need to finish this pulse and
- * simulate a signal change to from pulse to space. Otherwise
- * upper layers will receive two sequences next time.
- */
-
- unsigned long flags;
- unsigned long pulse_end;
-
- /* avoid interference with interrupt */
- spin_lock_irqsave(&timer_lock, flags);
- if (last_value) {
- /* clear unread bits in UART and restart */
- outb(UART_FCR_CLEAR_RCVR, io + UART_FCR);
- /* determine 'virtual' pulse end: */
- pulse_end = min_t(unsigned long,
- ktime_us_delta(last, last_intr_time),
- IR_MAX_DURATION);
- dev_dbg(&sir_ir_dev->dev, "timeout add %d for %lu usec\n",
- last_value, pulse_end);
- add_read_queue(last_value, pulse_end);
- last_value = 0;
- last = last_intr_time;
- }
- spin_unlock_irqrestore(&timer_lock, flags);
- ir_raw_event_handle(rcdev);
-}
-
-static irqreturn_t sir_interrupt(int irq, void *dev_id)
-{
- unsigned char data;
- ktime_t curr_time;
- unsigned long delt;
- unsigned long deltintr;
- unsigned long flags;
- int counter = 0;
- int iir, lsr;
-
- while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
- if (++counter > 256) {
- dev_err(&sir_ir_dev->dev, "Trapped in interrupt");
- break;
- }
-
- switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */
- case UART_IIR_MSI:
- (void)inb(io + UART_MSR);
- break;
- case UART_IIR_RLSI:
- case UART_IIR_THRI:
- (void)inb(io + UART_LSR);
- break;
- case UART_IIR_RDI:
- /* avoid interference with timer */
- spin_lock_irqsave(&timer_lock, flags);
- do {
- del_timer(&timerlist);
- data = inb(io + UART_RX);
- curr_time = ktime_get();
- delt = min_t(unsigned long,
- ktime_us_delta(last, curr_time),
- IR_MAX_DURATION);
- deltintr = min_t(unsigned long,
- ktime_us_delta(last_intr_time,
- curr_time),
- IR_MAX_DURATION);
- dev_dbg(&sir_ir_dev->dev, "t %lu, d %d\n",
- deltintr, (int)data);
- /*
- * if nothing came in last X cycles,
- * it was gap
- */
- if (deltintr > TIME_CONST * threshold) {
- if (last_value) {
- dev_dbg(&sir_ir_dev->dev, "GAP\n");
- /* simulate signal change */
- add_read_queue(last_value,
- delt -
- deltintr);
- last_value = 0;
- last = last_intr_time;
- delt = deltintr;
- }
- }
- data = 1;
- if (data ^ last_value) {
- /*
- * deltintr > 2*TIME_CONST, remember?
- * the other case is timeout
- */
- add_read_queue(last_value,
- delt - TIME_CONST);
- last_value = data;
- last = curr_time;
- last = ktime_sub_us(last,
- TIME_CONST);
- }
- last_intr_time = curr_time;
- if (data) {
- /*
- * start timer for end of
- * sequence detection
- */
- timerlist.expires = jiffies +
- SIR_TIMEOUT;
- add_timer(&timerlist);
- }
-
- lsr = inb(io + UART_LSR);
- } while (lsr & UART_LSR_DR); /* data ready */
- spin_unlock_irqrestore(&timer_lock, flags);
- break;
- default:
- break;
- }
- }
- ir_raw_event_handle(rcdev);
- return IRQ_RETVAL(IRQ_HANDLED);
-}
-
-static void send_space(unsigned long len)
-{
- usleep_range(len, len + 25);
-}
-
-static void send_pulse(unsigned long len)
-{
- long bytes_out = len / TIME_CONST;
-
- if (bytes_out == 0)
- bytes_out++;
-
- while (bytes_out--) {
- outb(PULSE, io + UART_TX);
- /* FIXME treba seriozne cakanie z char/serial.c */
- while (!(inb(io + UART_LSR) & UART_LSR_THRE))
- ;
- }
-}
-
-static int init_hardware(void)
-{
- u8 scratch, scratch2, scratch3;
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
-
- /*
- * This is a simple port existence test, borrowed from the autoconfig
- * function in drivers/tty/serial/8250/8250_port.c
- */
- scratch = sinp(UART_IER);
- soutp(UART_IER, 0);
-#ifdef __i386__
- outb(0xff, 0x080);
-#endif
- scratch2 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, 0x0f);
-#ifdef __i386__
- outb(0x00, 0x080);
-#endif
- scratch3 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0f) {
- /* we fail, there's nothing here */
- spin_unlock_irqrestore(&hardware_lock, flags);
- pr_err("port existence test failed, cannot continue\n");
- return -ENODEV;
- }
-
- /* reset UART */
- outb(0, io + UART_MCR);
- outb(0, io + UART_IER);
- /* init UART */
- /* set DLAB, speed = 115200 */
- outb(UART_LCR_DLAB | UART_LCR_WLEN7, io + UART_LCR);
- outb(1, io + UART_DLL); outb(0, io + UART_DLM);
- /* 7N1+start = 9 bits at 115200 ~ 3 bits at 44000 */
- outb(UART_LCR_WLEN7, io + UART_LCR);
- /* FIFO operation */
- outb(UART_FCR_ENABLE_FIFO, io + UART_FCR);
- /* interrupts */
- /* outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, io + UART_IER); */
- outb(UART_IER_RDI, io + UART_IER);
- /* turn on UART */
- outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
- spin_unlock_irqrestore(&hardware_lock, flags);
-
- return 0;
-}
-
-static void drop_hardware(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
-
- /* turn off interrupts */
- outb(0, io + UART_IER);
-
- spin_unlock_irqrestore(&hardware_lock, flags);
-}
-
-/* SECTION: Initialisation */
-static int sir_ir_probe(struct platform_device *dev)
-{
- int retval;
-
- rcdev = devm_rc_allocate_device(&sir_ir_dev->dev, RC_DRIVER_IR_RAW);
- if (!rcdev)
- return -ENOMEM;
-
- rcdev->device_name = "SIR IrDA port";
- rcdev->input_phys = KBUILD_MODNAME "/input0";
- rcdev->input_id.bustype = BUS_HOST;
- rcdev->input_id.vendor = 0x0001;
- rcdev->input_id.product = 0x0001;
- rcdev->input_id.version = 0x0100;
- rcdev->tx_ir = sir_tx_ir;
- rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- rcdev->driver_name = KBUILD_MODNAME;
- rcdev->map_name = RC_MAP_RC6_MCE;
- rcdev->timeout = IR_DEFAULT_TIMEOUT;
- rcdev->dev.parent = &sir_ir_dev->dev;
-
- timer_setup(&timerlist, sir_timeout, 0);
-
- /* get I/O port access and IRQ line */
- if (!devm_request_region(&sir_ir_dev->dev, io, 8, KBUILD_MODNAME)) {
- pr_err("i/o port 0x%.4x already in use.\n", io);
- return -EBUSY;
- }
- retval = devm_request_irq(&sir_ir_dev->dev, irq, sir_interrupt, 0,
- KBUILD_MODNAME, NULL);
- if (retval < 0) {
- pr_err("IRQ %d already in use.\n", irq);
- return retval;
- }
-
- retval = init_hardware();
- if (retval) {
- del_timer_sync(&timerlist);
- return retval;
- }
-
- pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
-
- retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
- if (retval < 0)
- return retval;
-
- return 0;
-}
-
-static int sir_ir_remove(struct platform_device *dev)
-{
- drop_hardware();
- del_timer_sync(&timerlist);
- return 0;
-}
-
-static struct platform_driver sir_ir_driver = {
- .probe = sir_ir_probe,
- .remove = sir_ir_remove,
- .driver = {
- .name = "sir_ir",
- },
-};
-
-static int __init sir_ir_init(void)
-{
- int retval;
-
- retval = platform_driver_register(&sir_ir_driver);
- if (retval)
- return retval;
-
- sir_ir_dev = platform_device_alloc("sir_ir", 0);
- if (!sir_ir_dev) {
- retval = -ENOMEM;
- goto pdev_alloc_fail;
- }
-
- retval = platform_device_add(sir_ir_dev);
- if (retval)
- goto pdev_add_fail;
-
- return 0;
-
-pdev_add_fail:
- platform_device_put(sir_ir_dev);
-pdev_alloc_fail:
- platform_driver_unregister(&sir_ir_driver);
- return retval;
-}
-
-static void __exit sir_ir_exit(void)
-{
- platform_device_unregister(sir_ir_dev);
- platform_driver_unregister(&sir_ir_driver);
-}
-
-module_init(sir_ir_init);
-module_exit(sir_ir_exit);
-
-MODULE_DESCRIPTION("Infrared receiver driver for SIR type serial ports");
-MODULE_AUTHOR("Milan Pikula");
-MODULE_LICENSE("GPL");
-
-module_param_hw(io, int, ioport, 0444);
-MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-
-module_param_hw(irq, int, irq, 0444);
-MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
-
-module_param(threshold, int, 0444);
-MODULE_PARM_DESC(threshold, "space detection threshold (3)");
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index d79d1e3996b2..4e419dbbacd3 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -231,7 +231,6 @@ static int st_rc_probe(struct platform_device *pdev)
int ret = -EINVAL;
struct rc_dev *rdev;
struct device *dev = &pdev->dev;
- struct resource *res;
struct st_rc_device *rc_dev;
struct device_node *np = pdev->dev.of_node;
const char *rx_mode;
@@ -274,9 +273,7 @@ static int st_rc_probe(struct platform_device *pdev)
goto err;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- rc_dev->base = devm_ioremap_resource(dev, res);
+ rc_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rc_dev->base)) {
ret = PTR_ERR(rc_dev->base);
goto err;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 9cd765e31c49..1cc5ebb85b6c 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -293,6 +293,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->driver_name = DRIVER_NAME;
rdev->map_name = RC_MAP_STREAMZAP;
+ rdev->rx_resolution = SZ_RESOLUTION;
ret = rc_register_device(rdev);
if (ret < 0) {
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 168e1d2c876a..391a591c1b75 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -255,7 +255,6 @@ static int sunxi_ir_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
const struct sunxi_ir_quirks *quirks;
- struct resource *res;
struct sunxi_ir *ir;
u32 b_clk_freq = SUNXI_IR_BASE_CLK;
@@ -301,8 +300,7 @@ static int sunxi_ir_probe(struct platform_device *pdev)
dev_dbg(dev, "set base clock frequency to %d Hz.\n", b_clk_freq);
/* IO */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ir->base = devm_ioremap_resource(dev, res);
+ ir->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->base)) {
return PTR_ERR(ir->base);
}
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index b91a1e845b97..506f52c1af10 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -618,7 +618,7 @@ fail_frontend:
fail_attach:
dvb_unregister_adapter(&dvb_spi->adapter);
fail_adapter:
- if (!dvb_spi->vcc_supply)
+ if (dvb_spi->vcc_supply)
regulator_disable(dvb_spi->vcc_supply);
fail_regulator:
kfree(dvb_spi);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
index 75617709c8ce..82620613d56b 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -564,6 +564,10 @@ static int vidtv_bridge_remove(struct platform_device *pdev)
static void vidtv_bridge_dev_release(struct device *dev)
{
+ struct vidtv_dvb *dvb;
+
+ dvb = dev_get_drvdata(dev);
+ kfree(dvb);
}
static struct platform_device vidtv_bridge_dev = {
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index d714fe50afe5..47575490e74a 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -12,11 +12,6 @@
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <pawel@osciak.com>
* Marek Szyprowski, <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version
*/
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/media/test-drivers/vimc/vimc-scaler.c b/drivers/media/test-drivers/vimc/vimc-scaler.c
index 06880dd0b6ac..820b8f5b502f 100644
--- a/drivers/media/test-drivers/vimc/vimc-scaler.c
+++ b/drivers/media/test-drivers/vimc/vimc-scaler.c
@@ -6,6 +6,7 @@
*/
#include <linux/moduleparam.h>
+#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-rect.h>
@@ -13,11 +14,11 @@
#include "vimc-common.h"
-static unsigned int sca_mult = 3;
-module_param(sca_mult, uint, 0000);
-MODULE_PARM_DESC(sca_mult, " the image size multiplier");
-
-#define MAX_ZOOM 8
+/* Pad identifier */
+enum vic_sca_pad {
+ VIMC_SCA_SINK = 0,
+ VIMC_SCA_SRC = 1,
+};
#define VIMC_SCA_FMT_WIDTH_DEFAULT 640
#define VIMC_SCA_FMT_HEIGHT_DEFAULT 480
@@ -25,19 +26,16 @@ MODULE_PARM_DESC(sca_mult, " the image size multiplier");
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- /* NOTE: the source fmt is the same as the sink
- * with the width and hight multiplied by mult
- */
- struct v4l2_mbus_framefmt sink_fmt;
struct v4l2_rect crop_rect;
+ /* Frame format for both sink and src pad */
+ struct v4l2_mbus_framefmt fmt[2];
/* Values calculated when the stream starts */
u8 *src_frame;
- unsigned int src_line_size;
unsigned int bpp;
struct media_pad pads[2];
};
-static const struct v4l2_mbus_framefmt sink_fmt_default = {
+static const struct v4l2_mbus_framefmt fmt_default = {
.width = VIMC_SCA_FMT_WIDTH_DEFAULT,
.height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
.code = MEDIA_BUS_FMT_RGB888_1X24,
@@ -72,17 +70,6 @@ vimc_sca_get_crop_bound_sink(const struct v4l2_mbus_framefmt *sink_fmt)
return r;
}
-static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
- const struct v4l2_mbus_framefmt *sink_fmt)
-{
- const struct v4l2_rect sink_rect =
- vimc_sca_get_crop_bound_sink(sink_fmt);
-
- /* Disallow rectangles smaller than the minimal one. */
- v4l2_rect_set_min_size(r, &crop_rect_min);
- v4l2_rect_map_inside(r, &sink_rect);
-}
-
static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
@@ -90,19 +77,14 @@ static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
struct v4l2_rect *r;
unsigned int i;
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
- *mf = sink_fmt_default;
-
- r = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- *r = crop_rect_default;
-
- for (i = 1; i < sd->entity.num_pads; i++) {
+ for (i = 0; i < sd->entity.num_pads; i++) {
mf = v4l2_subdev_get_try_format(sd, sd_state, i);
- *mf = sink_fmt_default;
- mf->width = mf->width * sca_mult;
- mf->height = mf->height * sca_mult;
+ *mf = fmt_default;
}
+ r = v4l2_subdev_get_try_crop(sd, sd_state, VIMC_SCA_SINK);
+ *r = crop_rect_default;
+
return 0;
}
@@ -144,112 +126,108 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
- if (VIMC_IS_SINK(fse->pad)) {
- fse->max_width = VIMC_FRAME_MAX_WIDTH;
- fse->max_height = VIMC_FRAME_MAX_HEIGHT;
- } else {
- fse->max_width = VIMC_FRAME_MAX_WIDTH * MAX_ZOOM;
- fse->max_height = VIMC_FRAME_MAX_HEIGHT * MAX_ZOOM;
- }
+ fse->max_width = VIMC_FRAME_MAX_WIDTH;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT;
return 0;
}
+static struct v4l2_mbus_framefmt *
+vimc_sca_pad_format(struct vimc_sca_device *vsca,
+ struct v4l2_subdev_state *sd_state, u32 pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&vsca->sd, sd_state, pad);
+ else
+ return &vsca->fmt[pad];
+}
+
+static struct v4l2_rect *
+vimc_sca_pad_crop(struct vimc_sca_device *vsca,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&vsca->sd, sd_state,
+ VIMC_SCA_SINK);
+ else
+ return &vsca->crop_rect;
+}
+
static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
- struct v4l2_rect *crop_rect;
-
- /* Get the current sink format */
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- } else {
- format->format = vsca->sink_fmt;
- crop_rect = &vsca->crop_rect;
- }
-
- /* Scale the frame size for the source pad */
- if (VIMC_IS_SRC(format->pad)) {
- format->format.width = crop_rect->width * sca_mult;
- format->format.height = crop_rect->height * sca_mult;
- }
+ format->format = *vimc_sca_pad_format(vsca, sd_state, format->pad,
+ format->which);
return 0;
}
-static void vimc_sca_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
-{
- const struct vimc_pix_map *vpix;
-
- /* Only accept code in the pix map table in non bayer format */
- vpix = vimc_pix_map_by_code(fmt->code);
- if (!vpix || vpix->bayer)
- fmt->code = sink_fmt_default.code;
-
- fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
- VIMC_FRAME_MAX_WIDTH) & ~1;
- fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
- VIMC_FRAME_MAX_HEIGHT) & ~1;
-
- if (fmt->field == V4L2_FIELD_ANY)
- fmt->field = sink_fmt_default.field;
-
- vimc_colorimetry_clamp(fmt);
-}
-
static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_format *format)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *sink_fmt;
- struct v4l2_rect *crop_rect;
+ struct v4l2_mbus_framefmt *fmt;
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- /* Do not change the format while stream is on */
- if (vsca->src_frame)
- return -EBUSY;
+ /* Do not change the active format while stream is on */
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && vsca->src_frame)
+ return -EBUSY;
- sink_fmt = &vsca->sink_fmt;
- crop_rect = &vsca->crop_rect;
- } else {
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
+ fmt = vimc_sca_pad_format(vsca, sd_state, format->pad, format->which);
+
+ /*
+ * The media bus code and colorspace can only be changed on the sink
+ * pad, the source pad only follows.
+ */
+ if (format->pad == VIMC_SCA_SINK) {
+ const struct vimc_pix_map *vpix;
+
+ /* Only accept code in the pix map table in non bayer format. */
+ vpix = vimc_pix_map_by_code(format->format.code);
+ if (vpix && !vpix->bayer)
+ fmt->code = format->format.code;
+ else
+ fmt->code = fmt_default.code;
+
+ /* Clamp the colorspace to valid values. */
+ fmt->colorspace = format->format.colorspace;
+ fmt->ycbcr_enc = format->format.ycbcr_enc;
+ fmt->quantization = format->format.quantization;
+ fmt->xfer_func = format->format.xfer_func;
+ vimc_colorimetry_clamp(fmt);
}
+ /* Clamp and align the width and height */
+ fmt->width = clamp_t(u32, format->format.width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, format->format.height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
/*
- * Do not change the format of the source pad,
- * it is propagated from the sink
+ * Propagate the sink pad format to the crop rectangle and the source
+ * pad.
*/
- if (VIMC_IS_SRC(fmt->pad)) {
- fmt->format = *sink_fmt;
- fmt->format.width = crop_rect->width * sca_mult;
- fmt->format.height = crop_rect->height * sca_mult;
- } else {
- /* Set the new format in the sink pad */
- vimc_sca_adjust_sink_fmt(&fmt->format);
-
- dev_dbg(vsca->ved.dev, "%s: sink format update: "
- "old:%dx%d (0x%x, %d, %d, %d, %d) "
- "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsca->sd.name,
- /* old */
- sink_fmt->width, sink_fmt->height, sink_fmt->code,
- sink_fmt->colorspace, sink_fmt->quantization,
- sink_fmt->xfer_func, sink_fmt->ycbcr_enc,
- /* new */
- fmt->format.width, fmt->format.height, fmt->format.code,
- fmt->format.colorspace, fmt->format.quantization,
- fmt->format.xfer_func, fmt->format.ycbcr_enc);
-
- *sink_fmt = fmt->format;
-
- /* Do the crop, but respect the current bounds */
- vimc_sca_adjust_sink_crop(crop_rect, sink_fmt);
+ if (format->pad == VIMC_SCA_SINK) {
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_rect *crop;
+
+ crop = vimc_sca_pad_crop(vsca, sd_state, format->which);
+ crop->width = fmt->width;
+ crop->height = fmt->height;
+ crop->top = 0;
+ crop->left = 0;
+
+ src_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SRC,
+ format->which);
+ *src_fmt = *fmt;
}
+ format->format = *fmt;
+
return 0;
}
@@ -259,24 +237,17 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
- struct v4l2_rect *crop_rect;
if (VIMC_IS_SRC(sel->pad))
return -EINVAL;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- sink_fmt = &vsca->sink_fmt;
- crop_rect = &vsca->crop_rect;
- } else {
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- }
-
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *crop_rect;
+ sel->r = *vimc_sca_pad_crop(vsca, sd_state, sel->which);
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
+ sink_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SINK,
+ sel->which);
sel->r = vimc_sca_get_crop_bound_sink(sink_fmt);
break;
default:
@@ -286,6 +257,17 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
return 0;
}
+static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
+ const struct v4l2_mbus_framefmt *sink_fmt)
+{
+ const struct v4l2_rect sink_rect =
+ vimc_sca_get_crop_bound_sink(sink_fmt);
+
+ /* Disallow rectangles smaller than the minimal one. */
+ v4l2_rect_set_min_size(r, &crop_rect_min);
+ v4l2_rect_map_inside(r, &sink_rect);
+}
+
static int vimc_sca_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
@@ -294,30 +276,18 @@ static int vimc_sca_set_selection(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *crop_rect;
- if (VIMC_IS_SRC(sel->pad))
+ /* Only support setting the crop of the sink pad */
+ if (VIMC_IS_SRC(sel->pad) || sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- /* Do not change the format while stream is on */
- if (vsca->src_frame)
- return -EBUSY;
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE && vsca->src_frame)
+ return -EBUSY;
- crop_rect = &vsca->crop_rect;
- sink_fmt = &vsca->sink_fmt;
- } else {
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- }
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP:
- /* Do the crop, but respect the current bounds */
- vimc_sca_adjust_sink_crop(&sel->r, sink_fmt);
- *crop_rect = sel->r;
- break;
- default:
- return -EINVAL;
- }
+ crop_rect = vimc_sca_pad_crop(vsca, sd_state, sel->which);
+ sink_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SINK,
+ sel->which);
+ vimc_sca_adjust_sink_crop(&sel->r, sink_fmt);
+ *crop_rect = sel->r;
return 0;
}
@@ -344,16 +314,12 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
/* Save the bytes per pixel of the sink */
- vpix = vimc_pix_map_by_code(vsca->sink_fmt.code);
+ vpix = vimc_pix_map_by_code(vsca->fmt[VIMC_SCA_SINK].code);
vsca->bpp = vpix->bpp;
- /* Calculate the width in bytes of the src frame */
- vsca->src_line_size = vsca->crop_rect.width *
- sca_mult * vsca->bpp;
-
/* Calculate the frame size of the source pad */
- frame_size = vsca->src_line_size * vsca->crop_rect.height *
- sca_mult;
+ frame_size = vsca->fmt[VIMC_SCA_SRC].width
+ * vsca->fmt[VIMC_SCA_SRC].height * vsca->bpp;
/* Allocate the frame buffer. Use vmalloc to be able to
* allocate a large amount of memory
@@ -382,77 +348,32 @@ static const struct v4l2_subdev_ops vimc_sca_ops = {
.video = &vimc_sca_video_ops,
};
-static void vimc_sca_fill_pix(u8 *const ptr,
- const u8 *const pixel,
- const unsigned int bpp)
-{
- unsigned int i;
-
- /* copy the pixel to the pointer */
- for (i = 0; i < bpp; i++)
- ptr[i] = pixel[i];
-}
-
-static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
- unsigned int lin, unsigned int col,
- const u8 *const sink_frame)
-{
- const struct v4l2_rect crop_rect = vsca->crop_rect;
- unsigned int i, j, index;
- const u8 *pixel;
-
- /* Point to the pixel value in position (lin, col) in the sink frame */
- index = VIMC_FRAME_INDEX(lin, col,
- vsca->sink_fmt.width,
- vsca->bpp);
- pixel = &sink_frame[index];
-
- dev_dbg(vsca->ved.dev,
- "sca: %s: --- scale_pix sink pos %dx%d, index %d ---\n",
- vsca->sd.name, lin, col, index);
-
- /* point to the place we are going to put the first pixel
- * in the scaled src frame
- */
- lin -= crop_rect.top;
- col -= crop_rect.left;
- index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
- crop_rect.width * sca_mult, vsca->bpp);
-
- dev_dbg(vsca->ved.dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
- vsca->sd.name, lin * sca_mult, col * sca_mult, index);
-
- /* Repeat this pixel mult times */
- for (i = 0; i < sca_mult; i++) {
- /* Iterate through each beginning of a
- * pixel repetition in a line
- */
- for (j = 0; j < sca_mult * vsca->bpp; j += vsca->bpp) {
- dev_dbg(vsca->ved.dev,
- "sca: %s: sca: scale_pix src pos %d\n",
- vsca->sd.name, index + j);
-
- /* copy the pixel to the position index + j */
- vimc_sca_fill_pix(&vsca->src_frame[index + j],
- pixel, vsca->bpp);
- }
-
- /* move the index to the next line */
- index += vsca->src_line_size;
- }
-}
-
static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
const u8 *const sink_frame)
{
- const struct v4l2_rect r = vsca->crop_rect;
- unsigned int i, j;
-
- /* Scale each pixel from the original sink frame */
- /* TODO: implement scale down, only scale up is supported for now */
- for (i = r.top; i < r.top + r.height; i++)
- for (j = r.left; j < r.left + r.width; j++)
- vimc_sca_scale_pix(vsca, i, j, sink_frame);
+ const struct v4l2_mbus_framefmt *src_fmt = &vsca->fmt[VIMC_SCA_SRC];
+ const struct v4l2_rect *r = &vsca->crop_rect;
+ unsigned int snk_width = vsca->fmt[VIMC_SCA_SINK].width;
+ unsigned int src_x, src_y;
+ u8 *walker = vsca->src_frame;
+
+ /* Set each pixel at the src_frame to its sink_frame equivalent */
+ for (src_y = 0; src_y < src_fmt->height; src_y++) {
+ unsigned int snk_y, y_offset;
+
+ snk_y = (src_y * r->height) / src_fmt->height + r->top;
+ y_offset = snk_y * snk_width * vsca->bpp;
+
+ for (src_x = 0; src_x < src_fmt->width; src_x++) {
+ unsigned int snk_x, x_offset, index;
+
+ snk_x = (src_x * r->width) / src_fmt->width + r->left;
+ x_offset = snk_x * vsca->bpp;
+ index = y_offset + x_offset;
+ memcpy(walker, &sink_frame[index], vsca->bpp);
+ walker += vsca->bpp;
+ }
+ }
}
static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
@@ -492,8 +413,8 @@ static struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
return ERR_PTR(-ENOMEM);
/* Initialize ved and sd */
- vsca->pads[0].flags = MEDIA_PAD_FL_SINK;
- vsca->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ vsca->pads[VIMC_SCA_SINK].flags = MEDIA_PAD_FL_SINK;
+ vsca->pads[VIMC_SCA_SRC].flags = MEDIA_PAD_FL_SOURCE;
ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
vcfg_name,
@@ -508,7 +429,8 @@ static struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
vsca->ved.dev = vimc->mdev.dev;
/* Initialize the frame format */
- vsca->sink_fmt = sink_fmt_default;
+ vsca->fmt[VIMC_SCA_SINK] = fmt_default;
+ vsca->fmt[VIMC_SCA_SRC] = fmt_default;
/* Initialize the crop selection */
vsca->crop_rect = crop_rect_default;
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c
index 55ea039fe5b2..1f7469ff04d5 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.c
+++ b/drivers/media/test-drivers/vivid/vivid-cec.c
@@ -5,40 +5,23 @@
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/delay.h>
#include <media/cec.h>
#include "vivid-core.h"
#include "vivid-cec.h"
-#define CEC_TIM_START_BIT_TOTAL 4500
-#define CEC_TIM_START_BIT_LOW 3700
-#define CEC_TIM_START_BIT_HIGH 800
-#define CEC_TIM_DATA_BIT_TOTAL 2400
-#define CEC_TIM_DATA_BIT_0_LOW 1500
-#define CEC_TIM_DATA_BIT_0_HIGH 900
-#define CEC_TIM_DATA_BIT_1_LOW 600
-#define CEC_TIM_DATA_BIT_1_HIGH 1800
+#define CEC_START_BIT_US 4500
+#define CEC_DATA_BIT_US 2400
+#define CEC_MARGIN_US 350
-void vivid_cec_bus_free_work(struct vivid_dev *dev)
-{
- spin_lock(&dev->cec_slock);
- while (!list_empty(&dev->cec_work_list)) {
- struct vivid_cec_work *cw =
- list_first_entry(&dev->cec_work_list,
- struct vivid_cec_work, list);
-
- spin_unlock(&dev->cec_slock);
- cancel_delayed_work_sync(&cw->work);
- spin_lock(&dev->cec_slock);
- list_del(&cw->list);
- cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_LOW_DRIVE);
- kfree(cw);
- }
- spin_unlock(&dev->cec_slock);
-}
+struct xfer_on_bus {
+ struct cec_adapter *adap;
+ u8 status;
+};
-static bool vivid_cec_find_dest_adap(struct vivid_dev *dev,
- struct cec_adapter *adap, u8 dest)
+static bool find_dest_adap(struct vivid_dev *dev,
+ struct cec_adapter *adap, u8 dest)
{
unsigned int i;
@@ -61,116 +44,187 @@ static bool vivid_cec_find_dest_adap(struct vivid_dev *dev,
return false;
}
-static void vivid_cec_pin_adap_events(struct cec_adapter *adap, ktime_t ts,
- const struct cec_msg *msg, bool nacked)
+static bool xfer_ready(struct vivid_dev *dev)
{
- unsigned int len = nacked ? 1 : msg->len;
unsigned int i;
- bool bit;
-
- if (adap == NULL)
- return;
+ bool ready = false;
- /*
- * Suffix ULL on constant 10 makes the expression
- * CEC_TIM_START_BIT_TOTAL + 10ULL * len * CEC_TIM_DATA_BIT_TOTAL
- * to be evaluated using 64-bit unsigned arithmetic (u64), which
- * is what ktime_sub_us expects as second argument.
- */
- ts = ktime_sub_us(ts, CEC_TIM_START_BIT_TOTAL +
- 10ULL * len * CEC_TIM_DATA_BIT_TOTAL);
- cec_queue_pin_cec_event(adap, false, false, ts);
- ts = ktime_add_us(ts, CEC_TIM_START_BIT_LOW);
- cec_queue_pin_cec_event(adap, true, false, ts);
- ts = ktime_add_us(ts, CEC_TIM_START_BIT_HIGH);
-
- for (i = 0; i < 10 * len; i++) {
- switch (i % 10) {
- case 0 ... 7:
- bit = msg->msg[i / 10] & (0x80 >> (i % 10));
- break;
- case 8: /* EOM */
- bit = i / 10 == msg->len - 1;
- break;
- case 9: /* ACK */
- bit = cec_msg_is_broadcast(msg) ^ nacked;
+ spin_lock(&dev->cec_xfers_slock);
+ for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) {
+ if (dev->xfers[i].sft &&
+ dev->xfers[i].sft <= dev->cec_sft) {
+ ready = true;
break;
}
- cec_queue_pin_cec_event(adap, false, false, ts);
- if (bit)
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_LOW);
- else
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_LOW);
- cec_queue_pin_cec_event(adap, true, false, ts);
- if (bit)
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_HIGH);
- else
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_HIGH);
}
-}
+ spin_unlock(&dev->cec_xfers_slock);
-static void vivid_cec_pin_events(struct vivid_dev *dev,
- const struct cec_msg *msg, bool nacked)
-{
- ktime_t ts = ktime_get();
- unsigned int i;
-
- vivid_cec_pin_adap_events(dev->cec_rx_adap, ts, msg, nacked);
- for (i = 0; i < MAX_OUTPUTS; i++)
- vivid_cec_pin_adap_events(dev->cec_tx_adap[i], ts, msg, nacked);
+ return ready;
}
-static void vivid_cec_xfer_done_worker(struct work_struct *work)
+/*
+ * If an adapter tries to send successive messages, it must wait for the
+ * longest signal-free time between its transmissions. But, if another
+ * adapter sends a message in the interim, then the wait can be reduced
+ * because the messages are no longer successive. Make these adjustments
+ * if necessary. Should be called holding cec_xfers_slock.
+ */
+static void adjust_sfts(struct vivid_dev *dev)
{
- struct vivid_cec_work *cw =
- container_of(work, struct vivid_cec_work, work.work);
- struct vivid_dev *dev = cw->dev;
- struct cec_adapter *adap = cw->adap;
- u8 dest = cec_msg_destination(&cw->msg);
- bool valid_dest;
unsigned int i;
+ u8 initiator;
- valid_dest = cec_msg_is_broadcast(&cw->msg);
- if (!valid_dest)
- valid_dest = vivid_cec_find_dest_adap(dev, adap, dest);
-
- cw->tx_status = valid_dest ? CEC_TX_STATUS_OK : CEC_TX_STATUS_NACK;
- spin_lock(&dev->cec_slock);
- dev->cec_xfer_time_jiffies = 0;
- dev->cec_xfer_start_jiffies = 0;
- list_del(&cw->list);
- spin_unlock(&dev->cec_slock);
- vivid_cec_pin_events(dev, &cw->msg, !valid_dest);
- cec_transmit_attempt_done(cw->adap, cw->tx_status);
-
- /* Broadcast message */
- if (adap != dev->cec_rx_adap)
- cec_received_msg(dev->cec_rx_adap, &cw->msg);
- for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
- if (adap != dev->cec_tx_adap[i])
- cec_received_msg(dev->cec_tx_adap[i], &cw->msg);
- kfree(cw);
+ for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) {
+ if (dev->xfers[i].sft <= CEC_SIGNAL_FREE_TIME_RETRY)
+ continue;
+ initiator = dev->xfers[i].msg[0] >> 4;
+ if (initiator == dev->last_initiator)
+ dev->xfers[i].sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ else
+ dev->xfers[i].sft = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
+ }
}
-static void vivid_cec_xfer_try_worker(struct work_struct *work)
+/*
+ * The main emulation of the bus on which CEC adapters attempt to send
+ * messages to each other. The bus keeps track of how long it has been
+ * signal-free and accepts a pending transmission only if the state of
+ * the bus matches the transmission's signal-free requirements. It calls
+ * cec_transmit_attempt_done() for all transmits that enter the bus and
+ * cec_received_msg() for successful transmits.
+ */
+int vivid_cec_bus_thread(void *_dev)
{
- struct vivid_cec_work *cw =
- container_of(work, struct vivid_cec_work, work.work);
- struct vivid_dev *dev = cw->dev;
-
- spin_lock(&dev->cec_slock);
- if (dev->cec_xfer_time_jiffies) {
- list_del(&cw->list);
- spin_unlock(&dev->cec_slock);
- cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_ARB_LOST);
- kfree(cw);
- } else {
- INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
- dev->cec_xfer_start_jiffies = jiffies;
- dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
- spin_unlock(&dev->cec_slock);
- schedule_delayed_work(&cw->work, dev->cec_xfer_time_jiffies);
+ u32 last_sft;
+ unsigned int i;
+ unsigned int dest;
+ ktime_t start, end;
+ s64 delta_us, retry_us;
+ struct vivid_dev *dev = _dev;
+
+ dev->cec_sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ for (;;) {
+ bool first = true;
+ int wait_xfer_us = 0;
+ bool valid_dest = false;
+ int wait_arb_lost_us = 0;
+ unsigned int first_idx = 0;
+ unsigned int first_status = 0;
+ struct cec_msg first_msg = {};
+ struct xfer_on_bus xfers_on_bus[MAX_OUTPUTS] = {};
+
+ wait_event_interruptible(dev->kthread_waitq_cec, xfer_ready(dev) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+ last_sft = dev->cec_sft;
+ dev->cec_sft = 0;
+ /*
+ * Move the messages that are ready onto the bus. The adapter with
+ * the most leading zeros will win control of the bus and any other
+ * adapters will lose arbitration.
+ */
+ spin_lock(&dev->cec_xfers_slock);
+ for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) {
+ if (!dev->xfers[i].sft || dev->xfers[i].sft > last_sft)
+ continue;
+ if (first) {
+ first = false;
+ first_idx = i;
+ xfers_on_bus[first_idx].adap = dev->xfers[i].adap;
+ memcpy(first_msg.msg, dev->xfers[i].msg, dev->xfers[i].len);
+ first_msg.len = dev->xfers[i].len;
+ } else {
+ xfers_on_bus[i].adap = dev->xfers[i].adap;
+ xfers_on_bus[i].status = CEC_TX_STATUS_ARB_LOST;
+ /*
+ * For simplicity wait for all 4 bits of the initiator's
+ * address even though HDMI specification uses bit-level
+ * precision.
+ */
+ wait_arb_lost_us = 4 * CEC_DATA_BIT_US + CEC_START_BIT_US;
+ }
+ dev->xfers[i].sft = 0;
+ }
+ dev->last_initiator = cec_msg_initiator(&first_msg);
+ adjust_sfts(dev);
+ spin_unlock(&dev->cec_xfers_slock);
+
+ dest = cec_msg_destination(&first_msg);
+ valid_dest = cec_msg_is_broadcast(&first_msg);
+ if (!valid_dest)
+ valid_dest = find_dest_adap(dev, xfers_on_bus[first_idx].adap, dest);
+ if (valid_dest) {
+ first_status = CEC_TX_STATUS_OK;
+ /*
+ * Message length is in bytes, but each byte is transmitted in
+ * a block of 10 bits.
+ */
+ wait_xfer_us = first_msg.len * 10 * CEC_DATA_BIT_US;
+ } else {
+ first_status = CEC_TX_STATUS_NACK;
+ /*
+ * A message that is not acknowledged stops transmitting after
+ * the header block of 10 bits.
+ */
+ wait_xfer_us = 10 * CEC_DATA_BIT_US;
+ }
+ wait_xfer_us += CEC_START_BIT_US;
+ xfers_on_bus[first_idx].status = first_status;
+
+ /* Sleep as if sending messages on a real hardware bus. */
+ start = ktime_get();
+ if (wait_arb_lost_us) {
+ usleep_range(wait_arb_lost_us - CEC_MARGIN_US, wait_arb_lost_us);
+ for (i = 0; i < ARRAY_SIZE(xfers_on_bus); i++) {
+ if (xfers_on_bus[i].status != CEC_TX_STATUS_ARB_LOST)
+ continue;
+ cec_transmit_attempt_done(xfers_on_bus[i].adap,
+ CEC_TX_STATUS_ARB_LOST);
+ }
+ if (kthread_should_stop())
+ break;
+ }
+ wait_xfer_us -= wait_arb_lost_us;
+ usleep_range(wait_xfer_us - CEC_MARGIN_US, wait_xfer_us);
+ cec_transmit_attempt_done(xfers_on_bus[first_idx].adap, first_status);
+ if (kthread_should_stop())
+ break;
+ if (first_status == CEC_TX_STATUS_OK) {
+ if (xfers_on_bus[first_idx].adap != dev->cec_rx_adap)
+ cec_received_msg(dev->cec_rx_adap, &first_msg);
+ for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
+ if (xfers_on_bus[first_idx].adap != dev->cec_tx_adap[i])
+ cec_received_msg(dev->cec_tx_adap[i], &first_msg);
+ }
+ end = ktime_get();
+ /*
+ * If the emulated transfer took more or less time than it should
+ * have, then compensate by adjusting the wait time needed for the
+ * bus to be signal-free for 3 bit periods (the retry time).
+ */
+ delta_us = div_s64(end - start, 1000);
+ delta_us -= wait_xfer_us + wait_arb_lost_us;
+ retry_us = CEC_SIGNAL_FREE_TIME_RETRY * CEC_DATA_BIT_US - delta_us;
+ if (retry_us > CEC_MARGIN_US)
+ usleep_range(retry_us - CEC_MARGIN_US, retry_us);
+ dev->cec_sft = CEC_SIGNAL_FREE_TIME_RETRY;
+ /*
+ * If there are no messages that need to be retried, check if any
+ * adapters that did not just transmit a message are ready to
+ * transmit. If none of these adapters are ready, then increase
+ * the signal-free time so that the bus is available to all
+ * adapters and go back to waiting for a transmission.
+ */
+ while (dev->cec_sft >= CEC_SIGNAL_FREE_TIME_RETRY &&
+ dev->cec_sft < CEC_SIGNAL_FREE_TIME_NEXT_XFER &&
+ !xfer_ready(dev) && !kthread_should_stop()) {
+ usleep_range(2 * CEC_DATA_BIT_US - CEC_MARGIN_US,
+ 2 * CEC_DATA_BIT_US);
+ dev->cec_sft += 2;
+ }
}
+ return 0;
}
static int vivid_cec_adap_enable(struct cec_adapter *adap, bool enable)
@@ -184,41 +238,26 @@ static int vivid_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
return 0;
}
-/*
- * One data bit takes 2400 us, each byte needs 10 bits so that's 24000 us
- * per byte.
- */
-#define USECS_PER_BYTE 24000
-
static int vivid_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct vivid_dev *dev = cec_get_drvdata(adap);
- struct vivid_cec_work *cw = kzalloc(sizeof(*cw), GFP_KERNEL);
- long delta_jiffies = 0;
-
- if (cw == NULL)
- return -ENOMEM;
- cw->dev = dev;
- cw->adap = adap;
- cw->usecs = CEC_FREE_TIME_TO_USEC(signal_free_time) +
- msg->len * USECS_PER_BYTE;
- cw->msg = *msg;
-
- spin_lock(&dev->cec_slock);
- list_add(&cw->list, &dev->cec_work_list);
- if (dev->cec_xfer_time_jiffies == 0) {
- INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
- dev->cec_xfer_start_jiffies = jiffies;
- dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
- delta_jiffies = dev->cec_xfer_time_jiffies;
- } else {
- INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_try_worker);
- delta_jiffies = dev->cec_xfer_start_jiffies +
- dev->cec_xfer_time_jiffies - jiffies;
+ u8 idx = cec_msg_initiator(msg);
+
+ spin_lock(&dev->cec_xfers_slock);
+ dev->xfers[idx].adap = adap;
+ memcpy(dev->xfers[idx].msg, msg->msg, CEC_MAX_MSG_SIZE);
+ dev->xfers[idx].len = msg->len;
+ dev->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_RETRY;
+ if (signal_free_time > CEC_SIGNAL_FREE_TIME_RETRY) {
+ if (idx == dev->last_initiator)
+ dev->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ else
+ dev->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
}
- spin_unlock(&dev->cec_slock);
- schedule_delayed_work(&cw->work, delta_jiffies < 0 ? 0 : delta_jiffies);
+ spin_unlock(&dev->cec_xfers_slock);
+ wake_up_interruptible(&dev->kthread_waitq_cec);
+
return 0;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.h b/drivers/media/test-drivers/vivid/vivid-cec.h
index 7524ed48a914..b2bcddb50b83 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.h
+++ b/drivers/media/test-drivers/vivid/vivid-cec.h
@@ -9,12 +9,5 @@
struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
bool is_source);
-void vivid_cec_bus_free_work(struct vivid_dev *dev);
-
-#else
-
-static inline void vivid_cec_bus_free_work(struct vivid_dev *dev)
-{
-}
-
+int vivid_cec_bus_thread(void *_dev);
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index d2bd2653cf54..04b75666bad4 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -177,6 +177,15 @@ MODULE_PARM_DESC(cache_hints, " user-space cache hints, default is 0.\n"
"\t\t 0 == forbid\n"
"\t\t 1 == allow");
+static unsigned int supports_requests[VIVID_MAX_DEVS] = {
+ [0 ... (VIVID_MAX_DEVS - 1)] = 1
+};
+module_param_array(supports_requests, uint, NULL, 0444);
+MODULE_PARM_DESC(supports_requests, " support for requests, default is 1.\n"
+ "\t\t 0 == no support\n"
+ "\t\t 1 == supports requests\n"
+ "\t\t 2 == requires requests");
+
static struct vivid_dev *vivid_devs[VIVID_MAX_DEVS];
const struct v4l2_rect vivid_min_rect = {
@@ -883,10 +892,11 @@ static int vivid_create_queue(struct vivid_dev *dev,
q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
&vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = min_buffers_needed;
+ q->min_buffers_needed = supports_requests[dev->inst] ? 0 : min_buffers_needed;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
+ q->supports_requests = supports_requests[dev->inst];
+ q->requires_requests = supports_requests[dev->inst] >= 2;
q->allow_cache_hints = (cache_hints[dev->inst] == 1);
return vb2_queue_init(q);
@@ -1878,18 +1888,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
INIT_LIST_HEAD(&dev->meta_out_active);
INIT_LIST_HEAD(&dev->touch_cap_active);
- INIT_LIST_HEAD(&dev->cec_work_list);
- spin_lock_init(&dev->cec_slock);
- /*
- * Same as create_singlethread_workqueue, but now I can use the
- * string formatting of alloc_ordered_workqueue.
- */
- dev->cec_workqueue = alloc_ordered_workqueue("vivid-%03d-cec",
- WQ_MEM_RECLAIM, inst);
- if (!dev->cec_workqueue) {
- ret = -ENOMEM;
- goto unreg_dev;
- }
+ spin_lock_init(&dev->cec_xfers_slock);
if (allocators[inst] == 1)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -1929,6 +1928,19 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
cec_tx_bus_cnt++;
}
}
+
+ if (dev->cec_rx_adap || cec_tx_bus_cnt) {
+ init_waitqueue_head(&dev->kthread_waitq_cec);
+ dev->kthread_cec = kthread_run(vivid_cec_bus_thread, dev,
+ "vivid_cec-%s", dev->v4l2_dev.name);
+ if (IS_ERR(dev->kthread_cec)) {
+ ret = PTR_ERR(dev->kthread_cec);
+ dev->kthread_cec = NULL;
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ goto unreg_dev;
+ }
+ }
+
#endif
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap);
@@ -1968,10 +1980,8 @@ unreg_dev:
cec_unregister_adapter(dev->cec_rx_adap);
for (i = 0; i < MAX_OUTPUTS; i++)
cec_unregister_adapter(dev->cec_tx_adap[i]);
- if (dev->cec_workqueue) {
- vivid_cec_bus_free_work(dev);
- destroy_workqueue(dev->cec_workqueue);
- }
+ if (dev->kthread_cec)
+ kthread_stop(dev->kthread_cec);
free_dev:
v4l2_device_put(&dev->v4l2_dev);
return ret;
@@ -2093,10 +2103,8 @@ static int vivid_remove(struct platform_device *pdev)
cec_unregister_adapter(dev->cec_rx_adap);
for (j = 0; j < MAX_OUTPUTS; j++)
cec_unregister_adapter(dev->cec_tx_adap[j]);
- if (dev->cec_workqueue) {
- vivid_cec_bus_free_work(dev);
- destroy_workqueue(dev->cec_workqueue);
- }
+ if (dev->kthread_cec)
+ kthread_stop(dev->kthread_cec);
v4l2_device_put(&dev->v4l2_dev);
vivid_devs[i] = NULL;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index 1e3c4f5a9413..45f96706edde 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -110,15 +110,11 @@ enum vivid_colorspace {
#define VIVID_INVALID_SIGNAL(mode) \
((mode) == NO_SIGNAL || (mode) == NO_LOCK || (mode) == OUT_OF_RANGE)
-struct vivid_cec_work {
- struct list_head list;
- struct delayed_work work;
+struct vivid_cec_xfer {
struct cec_adapter *adap;
- struct vivid_dev *dev;
- unsigned int usecs;
- unsigned int timeout_ms;
- u8 tx_status;
- struct cec_msg msg;
+ u8 msg[CEC_MAX_MSG_SIZE];
+ u32 len;
+ u32 sft;
};
struct vivid_dev {
@@ -560,12 +556,13 @@ struct vivid_dev {
/* CEC */
struct cec_adapter *cec_rx_adap;
struct cec_adapter *cec_tx_adap[MAX_OUTPUTS];
- struct workqueue_struct *cec_workqueue;
- spinlock_t cec_slock;
- struct list_head cec_work_list;
- unsigned int cec_xfer_time_jiffies;
- unsigned long cec_xfer_start_jiffies;
u8 cec_output2bus_map[MAX_OUTPUTS];
+ struct task_struct *kthread_cec;
+ wait_queue_head_t kthread_waitq_cec;
+ struct vivid_cec_xfer xfers[MAX_OUTPUTS];
+ spinlock_t cec_xfers_slock; /* read and write cec messages */
+ u32 cec_sft; /* bus signal free time, in bit periods */
+ u8 last_initiator;
/* CEC OSD String */
char osd[14];
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 26a277975cb1..03c46a62bf26 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -172,7 +172,6 @@ static void set_reg_bits(struct reg_pair_t *reg_pair, u8 reg, u8 mask, u8 val)
i++;
}
- return;
}
static void copy_reg_bits(struct reg_pair_t *reg_pair1,
@@ -193,7 +192,6 @@ static void copy_reg_bits(struct reg_pair_t *reg_pair1,
}
i++;
}
- return;
}
/* ------------------------------------------------------------------------- */
@@ -221,7 +219,6 @@ static void mxl5007t_set_mode_bits(struct mxl5007t_state *state,
default:
mxl_fail(-EINVAL);
}
- return;
}
static void mxl5007t_set_if_freq_bits(struct mxl5007t_state *state,
@@ -274,8 +271,6 @@ static void mxl5007t_set_if_freq_bits(struct mxl5007t_state *state,
set_reg_bits(state->tab_init, 0x02, 0x10, invert_if ? 0x10 : 0x00);
state->if_freq = if_freq;
-
- return;
}
static void mxl5007t_set_xtal_freq_bits(struct mxl5007t_state *state,
@@ -343,8 +338,6 @@ static void mxl5007t_set_xtal_freq_bits(struct mxl5007t_state *state,
mxl_fail(-EINVAL);
return;
}
-
- return;
}
static struct reg_pair_t *mxl5007t_calc_init_regs(struct mxl5007t_state *state,
@@ -398,8 +391,6 @@ static void mxl5007t_set_bw_bits(struct mxl5007t_state *state,
return;
}
set_reg_bits(state->tab_rftune, 0x0c, 0x3f, val);
-
- return;
}
static struct
diff --git a/drivers/media/tuners/tuner-types.c b/drivers/media/tuners/tuner-types.c
index 01f61ebabd56..0ed2c5bc082e 100644
--- a/drivers/media/tuners/tuner-types.c
+++ b/drivers/media/tuners/tuner-types.c
@@ -1942,6 +1942,10 @@ struct tunertype tuners[] = {
.params = tuner_sony_btf_pg463z_params,
.count = ARRAY_SIZE(tuner_sony_btf_pg463z_params),
},
+ [TUNER_SI2157] = {
+ .name = "Silicon Labs Si2157 tuner",
+ /* see si2157.c for details */
+ },
};
EXPORT_SYMBOL(tuners);
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 7a81be7970b2..d568452618d1 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -415,8 +415,11 @@ static int airspy_alloc_urbs(struct airspy *s)
dev_dbg(s->dev, "alloc urb=%d\n", i);
s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!s->urb_list[i]) {
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
usb_free_urb(s->urb_list[j]);
+ s->urb_list[j] = NULL;
+ }
+ s->urbs_initialized = 0;
return -ENOMEM;
}
usb_fill_bulk_urb(s->urb_list[i],
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 7865fa0a8295..cd5861a30b6f 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -931,8 +931,6 @@ static int mxl111sf_init(struct dvb_usb_device *d)
.len = sizeof(eeprom), .buf = eeprom },
};
- mutex_init(&state->msg_lock);
-
ret = get_chip_info(state);
if (mxl_fail(ret))
pr_err("failed to get chip info during probe");
@@ -1074,6 +1072,14 @@ static int mxl111sf_get_stream_config_dvbt(struct dvb_frontend *fe,
return 0;
}
+static int mxl111sf_probe(struct dvb_usb_device *dev)
+{
+ struct mxl111sf_state *state = d_to_priv(dev);
+
+ mutex_init(&state->msg_lock);
+ return 0;
+}
+
static struct dvb_usb_device_properties mxl111sf_props_dvbt = {
.driver_name = KBUILD_MODNAME,
.owner = THIS_MODULE,
@@ -1083,6 +1089,7 @@ static struct dvb_usb_device_properties mxl111sf_props_dvbt = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_dvbt,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1124,6 +1131,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_atsc,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1165,6 +1173,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mh = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_mh,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1233,6 +1242,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc_mh = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_atsc_mh,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1311,6 +1321,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_mercury,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1381,6 +1392,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury_mh = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_mercury_mh,
.tuner_attach = mxl111sf_attach_tuner,
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 1c39b61cde29..86788771175b 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -391,6 +391,7 @@ static struct rc_map_table rc_map_az6027_table[] = {
/* remote control stuff (does not work with my box) */
static int az6027_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
+ *state = REMOTE_NO_KEY_PRESSED;
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 02b51d1a1b67..aff60c10cb0b 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -223,7 +223,7 @@ int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
u8 *buf;
int rc;
- buf = kmalloc(2, GFP_KERNEL);
+ buf = kzalloc(2, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index c1e0dccb7408..b207f34af5c6 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2508,12 +2508,17 @@ const struct em28xx_board em28xx_boards[] = {
.def_i2c_bus = 1,
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = TUNER_SI2157,
.tuner_gpio = hauppauge_dualhd_dvb,
.has_dvb = 1,
.has_dual_ts = 1,
.ir_codes = RC_MAP_HAUPPAUGE,
.leds = hauppauge_dualhd_leds,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
},
/*
* 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc.
@@ -4139,8 +4144,11 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
em28xx_close_extension(dev);
- if (dev->dev_next)
+ if (dev->dev_next) {
+ em28xx_close_extension(dev->dev_next);
em28xx_release_resources(dev->dev_next);
+ }
+
em28xx_release_resources(dev);
if (dev->dev_next) {
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 584fa400cd7d..acc0bf7dbe2b 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -1154,8 +1154,9 @@ int em28xx_suspend_extension(struct em28xx *dev)
dev_info(&dev->intf->dev, "Suspending extensions\n");
mutex_lock(&em28xx_devlist_mutex);
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
- if (ops->suspend)
- ops->suspend(dev);
+ if (!ops->suspend)
+ continue;
+ ops->suspend(dev);
if (dev->dev_next)
ops->suspend(dev->dev_next);
}
diff --git a/drivers/media/usb/gspca/gl860/gl860-mi1320.c b/drivers/media/usb/gspca/gl860/gl860-mi1320.c
index 0749fe13160f..d6a540ed378c 100644
--- a/drivers/media/usb/gspca/gl860/gl860-mi1320.c
+++ b/drivers/media/usb/gspca/gl860/gl860-mi1320.c
@@ -50,42 +50,69 @@ static struct validx tbl_post_unset_alt[] = {
};
static u8 *tbl_1280[] = {
- "\x0d\x80\xf1\x08\x03\x04\xf1\x00" "\x04\x05\xf1\x02\x05\x00\xf1\xf1"
- "\x06\x00\xf1\x0d\x20\x01\xf1\x00" "\x21\x84\xf1\x00\x0d\x00\xf1\x08"
- "\xf0\x00\xf1\x01\x34\x00\xf1\x00" "\x9b\x43\xf1\x00\xa6\x05\xf1\x00"
- "\xa9\x04\xf1\x00\xa1\x05\xf1\x00" "\xa4\x04\xf1\x00\xae\x0a\xf1\x08"
- ,
- "\xf0\x00\xf1\x02\x3a\x05\xf1\xf1" "\x3c\x05\xf1\xf1\x59\x01\xf1\x47"
- "\x5a\x01\xf1\x88\x5c\x0a\xf1\x06" "\x5d\x0e\xf1\x0a\x64\x5e\xf1\x1c"
- "\xd2\x00\xf1\xcf\xcb\x00\xf1\x01"
- ,
- "\xd3\x02\xd4\x28\xd5\x01\xd0\x02" "\xd1\x18\xd2\xc1"
+ (u8[]){
+ 0x0d, 0x80, 0xf1, 0x08, 0x03, 0x04, 0xf1, 0x00,
+ 0x04, 0x05, 0xf1, 0x02, 0x05, 0x00, 0xf1, 0xf1,
+ 0x06, 0x00, 0xf1, 0x0d, 0x20, 0x01, 0xf1, 0x00,
+ 0x21, 0x84, 0xf1, 0x00, 0x0d, 0x00, 0xf1, 0x08,
+ 0xf0, 0x00, 0xf1, 0x01, 0x34, 0x00, 0xf1, 0x00,
+ 0x9b, 0x43, 0xf1, 0x00, 0xa6, 0x05, 0xf1, 0x00,
+ 0xa9, 0x04, 0xf1, 0x00, 0xa1, 0x05, 0xf1, 0x00,
+ 0xa4, 0x04, 0xf1, 0x00, 0xae, 0x0a, 0xf1, 0x08
+ }, (u8[]){
+ 0xf0, 0x00, 0xf1, 0x02, 0x3a, 0x05, 0xf1, 0xf1,
+ 0x3c, 0x05, 0xf1, 0xf1, 0x59, 0x01, 0xf1, 0x47,
+ 0x5a, 0x01, 0xf1, 0x88, 0x5c, 0x0a, 0xf1, 0x06,
+ 0x5d, 0x0e, 0xf1, 0x0a, 0x64, 0x5e, 0xf1, 0x1c,
+ 0xd2, 0x00, 0xf1, 0xcf, 0xcb, 0x00, 0xf1, 0x01
+ }, (u8[]){
+ 0xd3, 0x02, 0xd4, 0x28, 0xd5, 0x01, 0xd0, 0x02,
+ 0xd1, 0x18, 0xd2, 0xc1
+ }
};
static u8 *tbl_800[] = {
- "\x0d\x80\xf1\x08\x03\x03\xf1\xc0" "\x04\x05\xf1\x02\x05\x00\xf1\xf1"
- "\x06\x00\xf1\x0d\x20\x01\xf1\x00" "\x21\x84\xf1\x00\x0d\x00\xf1\x08"
- "\xf0\x00\xf1\x01\x34\x00\xf1\x00" "\x9b\x43\xf1\x00\xa6\x05\xf1\x00"
- "\xa9\x03\xf1\xc0\xa1\x03\xf1\x20" "\xa4\x02\xf1\x5a\xae\x0a\xf1\x08"
- ,
- "\xf0\x00\xf1\x02\x3a\x05\xf1\xf1" "\x3c\x05\xf1\xf1\x59\x01\xf1\x47"
- "\x5a\x01\xf1\x88\x5c\x0a\xf1\x06" "\x5d\x0e\xf1\x0a\x64\x5e\xf1\x1c"
- "\xd2\x00\xf1\xcf\xcb\x00\xf1\x01"
- ,
- "\xd3\x02\xd4\x18\xd5\x21\xd0\x02" "\xd1\x10\xd2\x59"
+ (u8[]){
+ 0x0d, 0x80, 0xf1, 0x08, 0x03, 0x03, 0xf1, 0xc0,
+ 0x04, 0x05, 0xf1, 0x02, 0x05, 0x00, 0xf1, 0xf1,
+ 0x06, 0x00, 0xf1, 0x0d, 0x20, 0x01, 0xf1, 0x00,
+ 0x21, 0x84, 0xf1, 0x00, 0x0d, 0x00, 0xf1, 0x08,
+ 0xf0, 0x00, 0xf1, 0x01, 0x34, 0x00, 0xf1, 0x00,
+ 0x9b, 0x43, 0xf1, 0x00, 0xa6, 0x05, 0xf1, 0x00,
+ 0xa9, 0x03, 0xf1, 0xc0, 0xa1, 0x03, 0xf1, 0x20,
+ 0xa4, 0x02, 0xf1, 0x5a, 0xae, 0x0a, 0xf1, 0x08
+ }, (u8[]){
+ 0xf0, 0x00, 0xf1, 0x02, 0x3a, 0x05, 0xf1, 0xf1,
+ 0x3c, 0x05, 0xf1, 0xf1, 0x59, 0x01, 0xf1, 0x47,
+ 0x5a, 0x01, 0xf1, 0x88, 0x5c, 0x0a, 0xf1, 0x06,
+ 0x5d, 0x0e, 0xf1, 0x0a, 0x64, 0x5e, 0xf1, 0x1c,
+ 0xd2, 0x00, 0xf1, 0xcf, 0xcb, 0x00, 0xf1, 0x01
+ }, (u8[]){
+ 0xd3, 0x02, 0xd4, 0x18, 0xd5, 0x21, 0xd0, 0x02,
+ 0xd1, 0x10, 0xd2, 0x59
+ }
};
static u8 *tbl_640[] = {
- "\x0d\x80\xf1\x08\x03\x04\xf1\x04" "\x04\x05\xf1\x02\x07\x01\xf1\x7c"
- "\x08\x00\xf1\x0e\x21\x80\xf1\x00" "\x0d\x00\xf1\x08\xf0\x00\xf1\x01"
- "\x34\x10\xf1\x10\x3a\x43\xf1\x00" "\xa6\x05\xf1\x02\xa9\x04\xf1\x04"
- "\xa7\x02\xf1\x81\xaa\x01\xf1\xe2" "\xae\x0c\xf1\x09"
- ,
- "\xf0\x00\xf1\x02\x39\x03\xf1\xfc" "\x3b\x04\xf1\x04\x57\x01\xf1\xb6"
- "\x58\x02\xf1\x0d\x5c\x1f\xf1\x19" "\x5d\x24\xf1\x1e\x64\x5e\xf1\x1c"
- "\xd2\x00\xf1\x00\xcb\x00\xf1\x01"
- ,
- "\xd3\x02\xd4\x10\xd5\x81\xd0\x02" "\xd1\x08\xd2\xe1"
+ (u8[]){
+ 0x0d, 0x80, 0xf1, 0x08, 0x03, 0x04, 0xf1, 0x04,
+ 0x04, 0x05, 0xf1, 0x02, 0x07, 0x01, 0xf1, 0x7c,
+ 0x08, 0x00, 0xf1, 0x0e, 0x21, 0x80, 0xf1, 0x00,
+ 0x0d, 0x00, 0xf1, 0x08, 0xf0, 0x00, 0xf1, 0x01,
+ 0x34, 0x10, 0xf1, 0x10, 0x3a, 0x43, 0xf1, 0x00,
+ 0xa6, 0x05, 0xf1, 0x02, 0xa9, 0x04, 0xf1, 0x04,
+ 0xa7, 0x02, 0xf1, 0x81, 0xaa, 0x01, 0xf1, 0xe2,
+ 0xae, 0x0c, 0xf1, 0x09
+ }, (u8[]){
+ 0xf0, 0x00, 0xf1, 0x02, 0x39, 0x03, 0xf1, 0xfc,
+ 0x3b, 0x04, 0xf1, 0x04, 0x57, 0x01, 0xf1, 0xb6,
+ 0x58, 0x02, 0xf1, 0x0d, 0x5c, 0x1f, 0xf1, 0x19,
+ 0x5d, 0x24, 0xf1, 0x1e, 0x64, 0x5e, 0xf1, 0x1c,
+ 0xd2, 0x00, 0xf1, 0x00, 0xcb, 0x00, 0xf1, 0x01
+ }, (u8[]){
+ 0xd3, 0x02, 0xd4, 0x10, 0xd5, 0x81, 0xd0, 0x02,
+ 0xd1, 0x08, 0xd2, 0xe1
+ }
};
static s32 tbl_sat[] = {0x25, 0x1d, 0x15, 0x0d, 0x05, 0x4d, 0x55, 0x5d, 0x2d};
diff --git a/drivers/media/usb/gspca/gl860/gl860-ov9655.c b/drivers/media/usb/gspca/gl860/gl860-ov9655.c
index 59b87d066187..766677ebcb34 100644
--- a/drivers/media/usb/gspca/gl860/gl860-ov9655.c
+++ b/drivers/media/usb/gspca/gl860/gl860-ov9655.c
@@ -25,69 +25,118 @@ static struct validx tbl_commmon[] = {
static s32 tbl_length[] = {12, 56, 52, 54, 56, 42, 32, 12};
static u8 *tbl_640[] = {
- "\x00\x40\x07\x6a\x06\xf3\x0d\x6a" "\x10\x10\xc1\x01"
- ,
- "\x12\x80\x00\x00\x01\x98\x02\x80" "\x03\x12\x04\x03\x0b\x57\x0e\x61"
- "\x0f\x42\x11\x01\x12\x60\x13\x00" "\x14\x3a\x16\x24\x17\x14\x18\x00"
- "\x19\x01\x1a\x3d\x1e\x04\x24\x3c" "\x25\x36\x26\x72\x27\x08\x28\x08"
- "\x29\x15\x2a\x00\x2b\x00\x2c\x08"
- ,
- "\x32\xff\x33\x00\x34\x3d\x35\x00" "\x36\xfa\x38\x72\x39\x57\x3a\x00"
- "\x3b\x0c\x3d\x99\x3e\x0c\x3f\xc1" "\x40\xc0\x41\x00\x42\xc0\x43\x0a"
- "\x44\xf0\x45\x46\x46\x62\x47\x2a" "\x48\x3c\x4a\xee\x4b\xe7\x4c\xe7"
- "\x4d\xe7\x4e\xe7"
- ,
- "\x4f\x98\x50\x98\x51\x00\x52\x28" "\x53\x70\x54\x98\x58\x1a\x59\x85"
- "\x5a\xa9\x5b\x64\x5c\x84\x5d\x53" "\x5e\x0e\x5f\xf0\x60\xf0\x61\xf0"
- "\x62\x00\x63\x00\x64\x02\x65\x20" "\x66\x00\x69\x0a\x6b\x5a\x6c\x04"
- "\x6d\x55\x6e\x00\x6f\x9d"
- ,
- "\x70\x15\x71\x78\x72\x00\x73\x00" "\x74\x3a\x75\x35\x76\x01\x77\x02"
- "\x7a\x24\x7b\x04\x7c\x07\x7d\x10" "\x7e\x28\x7f\x36\x80\x44\x81\x52"
- "\x82\x60\x83\x6c\x84\x78\x85\x8c" "\x86\x9e\x87\xbb\x88\xd2\x89\xe5"
- "\x8a\x23\x8c\x8d\x90\x7c\x91\x7b"
- ,
- "\x9d\x02\x9e\x02\x9f\x74\xa0\x73" "\xa1\x40\xa4\x50\xa5\x68\xa6\x70"
- "\xa8\xc1\xa9\xef\xaa\x92\xab\x04" "\xac\x80\xad\x80\xae\x80\xaf\x80"
- "\xb2\xf2\xb3\x20\xb4\x20\xb5\x00" "\xb6\xaf"
- ,
- "\xbb\xae\xbc\x4f\xbd\x4e\xbe\x6a" "\xbf\x68\xc0\xaa\xc1\xc0\xc2\x01"
- "\xc3\x4e\xc6\x85\xc7\x81\xc9\xe0" "\xca\xe8\xcb\xf0\xcc\xd8\xcd\x93"
- ,
- "\xd0\x01\xd1\x08\xd2\xe0\xd3\x01" "\xd4\x10\xd5\x80"
+ (u8[]){
+ 0x00, 0x40, 0x07, 0x6a, 0x06, 0xf3, 0x0d, 0x6a,
+ 0x10, 0x10, 0xc1, 0x01
+ }, (u8[]){
+ 0x12, 0x80, 0x00, 0x00, 0x01, 0x98, 0x02, 0x80,
+ 0x03, 0x12, 0x04, 0x03, 0x0b, 0x57, 0x0e, 0x61,
+ 0x0f, 0x42, 0x11, 0x01, 0x12, 0x60, 0x13, 0x00,
+ 0x14, 0x3a, 0x16, 0x24, 0x17, 0x14, 0x18, 0x00,
+ 0x19, 0x01, 0x1a, 0x3d, 0x1e, 0x04, 0x24, 0x3c,
+ 0x25, 0x36, 0x26, 0x72, 0x27, 0x08, 0x28, 0x08,
+ 0x29, 0x15, 0x2a, 0x00, 0x2b, 0x00, 0x2c, 0x08
+ }, (u8[]){
+ 0x32, 0xff, 0x33, 0x00, 0x34, 0x3d, 0x35, 0x00,
+ 0x36, 0xfa, 0x38, 0x72, 0x39, 0x57, 0x3a, 0x00,
+ 0x3b, 0x0c, 0x3d, 0x99, 0x3e, 0x0c, 0x3f, 0xc1,
+ 0x40, 0xc0, 0x41, 0x00, 0x42, 0xc0, 0x43, 0x0a,
+ 0x44, 0xf0, 0x45, 0x46, 0x46, 0x62, 0x47, 0x2a,
+ 0x48, 0x3c, 0x4a, 0xee, 0x4b, 0xe7, 0x4c, 0xe7,
+ 0x4d, 0xe7, 0x4e, 0xe7
+ }, (u8[]){
+ 0x4f, 0x98, 0x50, 0x98, 0x51, 0x00, 0x52, 0x28,
+ 0x53, 0x70, 0x54, 0x98, 0x58, 0x1a, 0x59, 0x85,
+ 0x5a, 0xa9, 0x5b, 0x64, 0x5c, 0x84, 0x5d, 0x53,
+ 0x5e, 0x0e, 0x5f, 0xf0, 0x60, 0xf0, 0x61, 0xf0,
+ 0x62, 0x00, 0x63, 0x00, 0x64, 0x02, 0x65, 0x20,
+ 0x66, 0x00, 0x69, 0x0a, 0x6b, 0x5a, 0x6c, 0x04,
+ 0x6d, 0x55, 0x6e, 0x00, 0x6f, 0x9d
+ }, (u8[]){
+ 0x70, 0x15, 0x71, 0x78, 0x72, 0x00, 0x73, 0x00,
+ 0x74, 0x3a, 0x75, 0x35, 0x76, 0x01, 0x77, 0x02,
+ 0x7a, 0x24, 0x7b, 0x04, 0x7c, 0x07, 0x7d, 0x10,
+ 0x7e, 0x28, 0x7f, 0x36, 0x80, 0x44, 0x81, 0x52,
+ 0x82, 0x60, 0x83, 0x6c, 0x84, 0x78, 0x85, 0x8c,
+ 0x86, 0x9e, 0x87, 0xbb, 0x88, 0xd2, 0x89, 0xe5,
+ 0x8a, 0x23, 0x8c, 0x8d, 0x90, 0x7c, 0x91, 0x7b
+ }, (u8[]){
+ 0x9d, 0x02, 0x9e, 0x02, 0x9f, 0x74, 0xa0, 0x73,
+ 0xa1, 0x40, 0xa4, 0x50, 0xa5, 0x68, 0xa6, 0x70,
+ 0xa8, 0xc1, 0xa9, 0xef, 0xaa, 0x92, 0xab, 0x04,
+ 0xac, 0x80, 0xad, 0x80, 0xae, 0x80, 0xaf, 0x80,
+ 0xb2, 0xf2, 0xb3, 0x20, 0xb4, 0x20, 0xb5, 0x00,
+ 0xb6, 0xaf
+ }, (u8[]){
+ 0xbb, 0xae, 0xbc, 0x4f, 0xbd, 0x4e, 0xbe, 0x6a,
+ 0xbf, 0x68, 0xc0, 0xaa, 0xc1, 0xc0, 0xc2, 0x01,
+ 0xc3, 0x4e, 0xc6, 0x85, 0xc7, 0x81, 0xc9, 0xe0,
+ 0xca, 0xe8, 0xcb, 0xf0, 0xcc, 0xd8, 0xcd, 0x93
+ }, (u8[]){
+ 0xd0, 0x01, 0xd1, 0x08, 0xd2, 0xe0, 0xd3, 0x01,
+ 0xd4, 0x10, 0xd5, 0x80
+ }
};
static u8 *tbl_1280[] = {
- "\x00\x40\x07\x6a\x06\xf3\x0d\x6a" "\x10\x10\xc1\x01"
- ,
- "\x12\x80\x00\x00\x01\x98\x02\x80" "\x03\x12\x04\x01\x0b\x57\x0e\x61"
- "\x0f\x42\x11\x00\x12\x00\x13\x00" "\x14\x3a\x16\x24\x17\x1b\x18\xbb"
- "\x19\x01\x1a\x81\x1e\x04\x24\x3c" "\x25\x36\x26\x72\x27\x08\x28\x08"
- "\x29\x15\x2a\x00\x2b\x00\x2c\x08"
- ,
- "\x32\xa4\x33\x00\x34\x3d\x35\x00" "\x36\xf8\x38\x72\x39\x57\x3a\x00"
- "\x3b\x0c\x3d\x99\x3e\x0c\x3f\xc2" "\x40\xc0\x41\x00\x42\xc0\x43\x0a"
- "\x44\xf0\x45\x46\x46\x62\x47\x2a" "\x48\x3c\x4a\xec\x4b\xe8\x4c\xe8"
- "\x4d\xe8\x4e\xe8"
- ,
- "\x4f\x98\x50\x98\x51\x00\x52\x28" "\x53\x70\x54\x98\x58\x1a\x59\x85"
- "\x5a\xa9\x5b\x64\x5c\x84\x5d\x53" "\x5e\x0e\x5f\xf0\x60\xf0\x61\xf0"
- "\x62\x00\x63\x00\x64\x02\x65\x20" "\x66\x00\x69\x02\x6b\x5a\x6c\x04"
- "\x6d\x55\x6e\x00\x6f\x9d"
- ,
- "\x70\x08\x71\x78\x72\x00\x73\x01" "\x74\x3a\x75\x35\x76\x01\x77\x02"
- "\x7a\x24\x7b\x04\x7c\x07\x7d\x10" "\x7e\x28\x7f\x36\x80\x44\x81\x52"
- "\x82\x60\x83\x6c\x84\x78\x85\x8c" "\x86\x9e\x87\xbb\x88\xd2\x89\xe5"
- "\x8a\x23\x8c\x0d\x90\x90\x91\x90"
- ,
- "\x9d\x02\x9e\x02\x9f\x94\xa0\x94" "\xa1\x01\xa4\x50\xa5\x68\xa6\x70"
- "\xa8\xc1\xa9\xef\xaa\x92\xab\x04" "\xac\x80\xad\x80\xae\x80\xaf\x80"
- "\xb2\xf2\xb3\x20\xb4\x20\xb5\x00" "\xb6\xaf"
- ,
- "\xbb\xae\xbc\x38\xbd\x39\xbe\x01" "\xbf\x01\xc0\xe2\xc1\xc0\xc2\x01"
- "\xc3\x4e\xc6\x85\xc7\x81\xc9\xe0" "\xca\xe8\xcb\xf0\xcc\xd8\xcd\x93"
- ,
- "\xd0\x21\xd1\x18\xd2\xe0\xd3\x01" "\xd4\x28\xd5\x00"
+ (u8[]){
+ 0x00, 0x40, 0x07, 0x6a, 0x06, 0xf3, 0x0d, 0x6a,
+ 0x10, 0x10, 0xc1, 0x01
+ },
+ (u8[]){
+ 0x12, 0x80, 0x00, 0x00, 0x01, 0x98, 0x02, 0x80,
+ 0x03, 0x12, 0x04, 0x01, 0x0b, 0x57, 0x0e, 0x61,
+ 0x0f, 0x42, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00,
+ 0x14, 0x3a, 0x16, 0x24, 0x17, 0x1b, 0x18, 0xbb,
+ 0x19, 0x01, 0x1a, 0x81, 0x1e, 0x04, 0x24, 0x3c,
+ 0x25, 0x36, 0x26, 0x72, 0x27, 0x08, 0x28, 0x08,
+ 0x29, 0x15, 0x2a, 0x00, 0x2b, 0x00, 0x2c, 0x08
+ },
+ (u8[]){
+ 0x32, 0xa4, 0x33, 0x00, 0x34, 0x3d, 0x35, 0x00,
+ 0x36, 0xf8, 0x38, 0x72, 0x39, 0x57, 0x3a, 0x00,
+ 0x3b, 0x0c, 0x3d, 0x99, 0x3e, 0x0c, 0x3f, 0xc2,
+ 0x40, 0xc0, 0x41, 0x00, 0x42, 0xc0, 0x43, 0x0a,
+ 0x44, 0xf0, 0x45, 0x46, 0x46, 0x62, 0x47, 0x2a,
+ 0x48, 0x3c, 0x4a, 0xec, 0x4b, 0xe8, 0x4c, 0xe8,
+ 0x4d, 0xe8, 0x4e, 0xe8
+ },
+ (u8[]){
+ 0x4f, 0x98, 0x50, 0x98, 0x51, 0x00, 0x52, 0x28,
+ 0x53, 0x70, 0x54, 0x98, 0x58, 0x1a, 0x59, 0x85,
+ 0x5a, 0xa9, 0x5b, 0x64, 0x5c, 0x84, 0x5d, 0x53,
+ 0x5e, 0x0e, 0x5f, 0xf0, 0x60, 0xf0, 0x61, 0xf0,
+ 0x62, 0x00, 0x63, 0x00, 0x64, 0x02, 0x65, 0x20,
+ 0x66, 0x00, 0x69, 0x02, 0x6b, 0x5a, 0x6c, 0x04,
+ 0x6d, 0x55, 0x6e, 0x00, 0x6f, 0x9d
+ },
+ (u8[]){
+ 0x70, 0x08, 0x71, 0x78, 0x72, 0x00, 0x73, 0x01,
+ 0x74, 0x3a, 0x75, 0x35, 0x76, 0x01, 0x77, 0x02,
+ 0x7a, 0x24, 0x7b, 0x04, 0x7c, 0x07, 0x7d, 0x10,
+ 0x7e, 0x28, 0x7f, 0x36, 0x80, 0x44, 0x81, 0x52,
+ 0x82, 0x60, 0x83, 0x6c, 0x84, 0x78, 0x85, 0x8c,
+ 0x86, 0x9e, 0x87, 0xbb, 0x88, 0xd2, 0x89, 0xe5,
+ 0x8a, 0x23, 0x8c, 0x0d, 0x90, 0x90, 0x91, 0x90
+ },
+ (u8[]){
+ 0x9d, 0x02, 0x9e, 0x02, 0x9f, 0x94, 0xa0, 0x94,
+ 0xa1, 0x01, 0xa4, 0x50, 0xa5, 0x68, 0xa6, 0x70,
+ 0xa8, 0xc1, 0xa9, 0xef, 0xaa, 0x92, 0xab, 0x04,
+ 0xac, 0x80, 0xad, 0x80, 0xae, 0x80, 0xaf, 0x80,
+ 0xb2, 0xf2, 0xb3, 0x20, 0xb4, 0x20, 0xb5, 0x00,
+ 0xb6, 0xaf
+ },
+ (u8[]){
+ 0xbb, 0xae, 0xbc, 0x38, 0xbd, 0x39, 0xbe, 0x01,
+ 0xbf, 0x01, 0xc0, 0xe2, 0xc1, 0xc0, 0xc2, 0x01,
+ 0xc3, 0x4e, 0xc6, 0x85, 0xc7, 0x81, 0xc9, 0xe0,
+ 0xca, 0xe8, 0xcb, 0xf0, 0xcc, 0xd8, 0xcd, 0x93
+ },
+ (u8[]){
+ 0xd0, 0x21, 0xd1, 0x18, 0xd2, 0xe0, 0xd3, 0x01,
+ 0xd4, 0x28, 0xd5, 0x00
+ }
};
static u8 c04[] = {0x04};
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 47d8f28bfdfc..770714c34295 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -444,6 +444,8 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
* next first packet, wake up the application and advance
* in the queue */
if (packet_type == LAST_PACKET) {
+ if (gspca_dev->image_len > gspca_dev->pixfmt.sizeimage)
+ gspca_dev->image_len = gspca_dev->pixfmt.sizeimage;
spin_lock_irqsave(&gspca_dev->qlock, flags);
list_del(&buf->list);
spin_unlock_irqrestore(&gspca_dev->qlock, flags);
diff --git a/drivers/media/usb/gspca/m5602/m5602_ov7660.h b/drivers/media/usb/gspca/m5602/m5602_ov7660.h
index d60247e10c2c..6146e8ef17c0 100644
--- a/drivers/media/usb/gspca/m5602/m5602_ov7660.h
+++ b/drivers/media/usb/gspca/m5602/m5602_ov7660.h
@@ -86,7 +86,6 @@ extern bool dump_sensor;
int ov7660_probe(struct sd *sd);
int ov7660_init(struct sd *sd);
-int ov7660_init(struct sd *sd);
int ov7660_init_controls(struct sd *sd);
int ov7660_start(struct sd *sd);
int ov7660_stop(struct sd *sd);
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index bfd194c61819..da916127a896 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL");
#define HAS_NO_BUTTON 0x1
#define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */
#define FLIP_DETECT 0x4
+#define HAS_LED_TORCH 0x8
/* specific webcam descriptor */
struct sd {
@@ -77,6 +78,8 @@ struct sd {
};
struct v4l2_ctrl *jpegqual;
+ struct v4l2_ctrl *led_mode;
+
struct work_struct work;
u32 pktsz; /* (used by pkt_scan) */
@@ -1533,6 +1536,12 @@ static void set_gain(struct gspca_dev *gspca_dev, s32 g)
i2c_w(gspca_dev, gain);
}
+static void set_led_mode(struct gspca_dev *gspca_dev, s32 val)
+{
+ reg_w1(gspca_dev, 0x1007, 0x60);
+ reg_w1(gspca_dev, 0x1006, val ? 0x40 : 0x00);
+}
+
static void set_quality(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1699,6 +1708,9 @@ static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
set_quality(gspca_dev, ctrl->val);
break;
+ case V4L2_CID_FLASH_LED_MODE:
+ set_led_mode(gspca_dev, ctrl->val);
+ break;
}
return gspca_dev->usb_err;
}
@@ -1757,6 +1769,12 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+
+ if (sd->flags & HAS_LED_TORCH)
+ sd->led_mode = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_FLASH_LED_MODE, V4L2_FLASH_LED_MODE_TORCH,
+ ~0x5, V4L2_FLASH_LED_MODE_NONE);
+
if (hdl->error) {
pr_err("Could not initialize controls\n");
return hdl->error;
@@ -2048,6 +2066,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
sd->pktsz = sd->npkt = 0;
sd->nchg = 0;
}
+ if (sd->led_mode)
+ v4l2_ctrl_s_ctrl(sd->led_mode, 0);
return gspca_dev->usb_err;
}
@@ -2325,7 +2345,7 @@ static const struct sd_desc sd_desc = {
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
- {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
+ {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, HAS_LED_TORCH)},
{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x624c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, LED_REVERSE)},
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
index 9f71d8c2a3c6..8ae3ad80cccb 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
@@ -355,11 +355,8 @@ static int parse_token(const char *ptr,unsigned int len,
int *valptr,
const char * const *names, unsigned int namecnt)
{
- char buf[33];
unsigned int slen;
unsigned int idx;
- int negfl;
- char *p2;
*valptr = 0;
if (!names) namecnt = 0;
for (idx = 0; idx < namecnt; idx++) {
@@ -370,18 +367,7 @@ static int parse_token(const char *ptr,unsigned int len,
*valptr = idx;
return 0;
}
- negfl = 0;
- if ((*ptr == '-') || (*ptr == '+')) {
- negfl = (*ptr == '-');
- ptr++; len--;
- }
- if (len >= sizeof(buf)) return -EINVAL;
- memcpy(buf,ptr,len);
- buf[len] = 0;
- *valptr = simple_strtol(buf,&p2,0);
- if (negfl) *valptr = -(*valptr);
- if (*p2) return -EINVAL;
- return 1;
+ return kstrtoint(ptr, 0, valptr) ? -EINVAL : 1;
}
@@ -389,10 +375,8 @@ static int parse_mtoken(const char *ptr,unsigned int len,
int *valptr,
const char **names,int valid_bits)
{
- char buf[33];
unsigned int slen;
unsigned int idx;
- char *p2;
int msk;
*valptr = 0;
for (idx = 0, msk = 1; valid_bits; idx++, msk <<= 1) {
@@ -405,12 +389,7 @@ static int parse_mtoken(const char *ptr,unsigned int len,
*valptr = msk;
return 0;
}
- if (len >= sizeof(buf)) return -EINVAL;
- memcpy(buf,ptr,len);
- buf[len] = 0;
- *valptr = simple_strtol(buf,&p2,0);
- if (*p2) return -EINVAL;
- return 0;
+ return kstrtoint(ptr, 0, valptr);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 9657c1883311..c04ab7258d64 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -640,10 +640,6 @@ static int pvr2_s_ext_ctrls(struct file *file, void *priv,
unsigned int idx;
int ret;
- /* Default value cannot be changed */
- if (ctls->which == V4L2_CTRL_WHICH_DEF_VAL)
- return -EINVAL;
-
ret = 0;
for (idx = 0; idx < ctls->count; idx++) {
ctrl = ctls->controls + idx;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 0e231e576dc3..9f445e6ab5fa 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -1234,6 +1234,11 @@ static void stk_v4l_dev_release(struct video_device *vd)
if (dev->sio_bufs != NULL || dev->isobufs != NULL)
pr_err("We are leaking memory\n");
usb_put_intf(dev->interface);
+ usb_put_dev(dev->udev);
+
+ v4l2_ctrl_handler_free(&dev->hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
}
static const struct video_device stk_v4l_data = {
@@ -1309,7 +1314,7 @@ static int stk_camera_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->wait_frame);
dev->first_init = 1; /* webcam LED management */
- dev->udev = udev;
+ dev->udev = usb_get_dev(udev);
dev->interface = interface;
usb_get_intf(interface);
@@ -1365,6 +1370,7 @@ static int stk_camera_probe(struct usb_interface *interface,
error_put:
usb_put_intf(interface);
+ usb_put_dev(dev->udev);
error:
v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(&dev->v4l2_dev);
@@ -1385,9 +1391,6 @@ static void stk_camera_disconnect(struct usb_interface *interface)
video_device_node_name(&dev->vdev));
video_unregister_device(&dev->vdev);
- v4l2_ctrl_handler_free(&dev->hdl);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 3f650ede0c3d..e293f6f3d1bc 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -852,8 +852,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
strscpy(cap->driver, "tm6000", sizeof(cap->driver));
- strscpy(cap->card, "Trident TVMaster TM5600/6000/6010",
- sizeof(cap->card));
+ strscpy(cap->card, "Trident TM5600/6000/6010", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_DEVICE_CAPS;
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index bfda46a36dc5..38822cedd93a 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -327,7 +327,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
result = mutex_lock_interruptible(&dec->usb_mutex);
if (result) {
printk("%s: Failed to lock usb mutex.\n", __func__);
- goto err;
+ goto err_free;
}
b[0] = 0xaa;
@@ -349,7 +349,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
if (result) {
printk("%s: command bulk message failed: error %d\n",
__func__, result);
- goto err;
+ goto err_mutex_unlock;
}
result = usb_bulk_msg(dec->udev, dec->result_pipe, b,
@@ -358,7 +358,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
if (result) {
printk("%s: result bulk message failed: error %d\n",
__func__, result);
- goto err;
+ goto err_mutex_unlock;
} else {
if (debug) {
printk(KERN_DEBUG "%s: result: %*ph\n",
@@ -371,9 +371,9 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
memcpy(cmd_result, &b[4], b[3]);
}
-err:
+err_mutex_unlock:
mutex_unlock(&dec->usb_mutex);
-
+err_free:
kfree(b);
return result;
}
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index b3dde98499f4..30bfe9069a1f 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -357,6 +357,11 @@ static const struct uvc_control_info uvc_ctrls[] = {
},
};
+static const u32 uvc_control_classes[] = {
+ V4L2_CID_CAMERA_CLASS,
+ V4L2_CID_USER_CLASS,
+};
+
static const struct uvc_menu_info power_line_frequency_controls[] = {
{ 0, "Disabled" },
{ 1, "50 Hz" },
@@ -427,7 +432,6 @@ static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
{
.id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BRIGHTNESS_CONTROL,
.size = 16,
@@ -437,7 +441,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_CONTRAST,
- .name = "Contrast",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_CONTRAST_CONTROL,
.size = 16,
@@ -447,7 +450,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_HUE,
- .name = "Hue",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_CONTROL,
.size = 16,
@@ -459,7 +461,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_SATURATION,
- .name = "Saturation",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SATURATION_CONTROL,
.size = 16,
@@ -469,7 +470,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_SHARPNESS,
- .name = "Sharpness",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SHARPNESS_CONTROL,
.size = 16,
@@ -479,7 +479,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_GAMMA,
- .name = "Gamma",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAMMA_CONTROL,
.size = 16,
@@ -489,7 +488,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_BACKLIGHT_COMPENSATION,
- .name = "Backlight Compensation",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL,
.size = 16,
@@ -499,7 +497,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_GAIN,
- .name = "Gain",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAIN_CONTROL,
.size = 16,
@@ -509,7 +506,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
- .name = "Power Line Frequency",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
.size = 2,
@@ -521,7 +517,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_HUE_AUTO,
- .name = "Hue, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_AUTO_CONTROL,
.size = 1,
@@ -532,7 +527,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_EXPOSURE_AUTO,
- .name = "Exposure, Auto",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_MODE_CONTROL,
.size = 4,
@@ -545,7 +539,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
- .name = "Exposure, Auto Priority",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_PRIORITY_CONTROL,
.size = 1,
@@ -555,7 +548,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_EXPOSURE_ABSOLUTE,
- .name = "Exposure (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL,
.size = 32,
@@ -567,7 +559,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
- .name = "White Balance Temperature, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,
.size = 1,
@@ -578,7 +569,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
- .name = "White Balance Temperature",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL,
.size = 16,
@@ -590,7 +580,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
- .name = "White Balance Component, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL,
.size = 1,
@@ -602,7 +591,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_BLUE_BALANCE,
- .name = "White Balance Blue Component",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
@@ -614,7 +602,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_RED_BALANCE,
- .name = "White Balance Red Component",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
@@ -626,7 +613,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_FOCUS_ABSOLUTE,
- .name = "Focus (absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL,
.size = 16,
@@ -638,7 +624,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_FOCUS_AUTO,
- .name = "Focus, Auto",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_AUTO_CONTROL,
.size = 1,
@@ -649,7 +634,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_IRIS_ABSOLUTE,
- .name = "Iris, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_ABSOLUTE_CONTROL,
.size = 16,
@@ -659,7 +643,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_IRIS_RELATIVE,
- .name = "Iris, Relative",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_RELATIVE_CONTROL,
.size = 8,
@@ -669,7 +652,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_ZOOM_ABSOLUTE,
- .name = "Zoom, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL,
.size = 16,
@@ -679,7 +661,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_ZOOM_CONTINUOUS,
- .name = "Zoom, Continuous",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.size = 0,
@@ -691,7 +672,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PAN_ABSOLUTE,
- .name = "Pan (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.size = 32,
@@ -701,7 +681,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_TILT_ABSOLUTE,
- .name = "Tilt (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.size = 32,
@@ -711,7 +690,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PAN_SPEED,
- .name = "Pan (Speed)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.size = 16,
@@ -723,7 +701,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_TILT_SPEED,
- .name = "Tilt (Speed)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.size = 16,
@@ -735,7 +712,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PRIVACY,
- .name = "Privacy",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PRIVACY_CONTROL,
.size = 1,
@@ -745,7 +721,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PRIVACY,
- .name = "Privacy",
.entity = UVC_GUID_EXT_GPIO_CONTROLLER,
.selector = UVC_CT_PRIVACY_CONTROL,
.size = 1,
@@ -1024,6 +999,85 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
return 0;
}
+static int __uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
+ u32 found_id)
+{
+ bool find_next = req_id & V4L2_CTRL_FLAG_NEXT_CTRL;
+ unsigned int i;
+
+ req_id &= V4L2_CTRL_ID_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(uvc_control_classes); i++) {
+ if (!(chain->ctrl_class_bitmap & BIT(i)))
+ continue;
+ if (!find_next) {
+ if (uvc_control_classes[i] == req_id)
+ return i;
+ continue;
+ }
+ if (uvc_control_classes[i] > req_id &&
+ uvc_control_classes[i] < found_id)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
+ u32 found_id, struct v4l2_queryctrl *v4l2_ctrl)
+{
+ int idx;
+
+ idx = __uvc_query_v4l2_class(chain, req_id, found_id);
+ if (idx < 0)
+ return -ENODEV;
+
+ memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
+ v4l2_ctrl->id = uvc_control_classes[idx];
+ strscpy(v4l2_ctrl->name, v4l2_ctrl_get_name(v4l2_ctrl->id),
+ sizeof(v4l2_ctrl->name));
+ v4l2_ctrl->type = V4L2_CTRL_TYPE_CTRL_CLASS;
+ v4l2_ctrl->flags = V4L2_CTRL_FLAG_WRITE_ONLY
+ | V4L2_CTRL_FLAG_READ_ONLY;
+ return 0;
+}
+
+int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ bool read)
+{
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl;
+
+ if (__uvc_query_v4l2_class(chain, v4l2_id, 0) >= 0)
+ return -EACCES;
+
+ ctrl = uvc_find_control(chain, v4l2_id, &mapping);
+ if (!ctrl)
+ return -EINVAL;
+
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) && read)
+ return -EACCES;
+
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read)
+ return -EACCES;
+
+ return 0;
+}
+
+static const char *uvc_map_get_name(const struct uvc_control_mapping *map)
+{
+ const char *name;
+
+ if (map->name)
+ return map->name;
+
+ name = v4l2_ctrl_get_name(map->id);
+ if (name)
+ return name;
+
+ return "Unknown Control";
+}
+
static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct uvc_control *ctrl,
struct uvc_control_mapping *mapping,
@@ -1037,7 +1091,8 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
v4l2_ctrl->id = mapping->id;
v4l2_ctrl->type = mapping->v4l2_type;
- strscpy(v4l2_ctrl->name, mapping->name, sizeof(v4l2_ctrl->name));
+ strscpy(v4l2_ctrl->name, uvc_map_get_name(mapping),
+ sizeof(v4l2_ctrl->name));
v4l2_ctrl->flags = 0;
if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
@@ -1127,12 +1182,31 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
if (ret < 0)
return -ERESTARTSYS;
+ /* Check if the ctrl is a know class */
+ if (!(v4l2_ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL)) {
+ ret = uvc_query_v4l2_class(chain, v4l2_ctrl->id, 0, v4l2_ctrl);
+ if (!ret)
+ goto done;
+ }
+
ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
if (ctrl == NULL) {
ret = -EINVAL;
goto done;
}
+ /*
+ * If we're enumerating control with V4L2_CTRL_FLAG_NEXT_CTRL, check if
+ * a class should be inserted between the previous control and the one
+ * we have just found.
+ */
+ if (v4l2_ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
+ ret = uvc_query_v4l2_class(chain, v4l2_ctrl->id, mapping->id,
+ v4l2_ctrl);
+ if (!ret)
+ goto done;
+ }
+
ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl);
done:
mutex_unlock(&chain->ctrl_mutex);
@@ -1426,6 +1500,11 @@ static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
if (ret < 0)
return -ERESTARTSYS;
+ if (__uvc_query_v4l2_class(handle->chain, sev->id, 0) >= 0) {
+ ret = 0;
+ goto done;
+ }
+
ctrl = uvc_find_control(handle->chain, sev->id, &mapping);
if (ctrl == NULL) {
ret = -EINVAL;
@@ -1459,7 +1538,10 @@ static void uvc_ctrl_del_event(struct v4l2_subscribed_event *sev)
struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh);
mutex_lock(&handle->chain->ctrl_mutex);
+ if (__uvc_query_v4l2_class(handle->chain, sev->id, 0) >= 0)
+ goto done;
list_del(&sev->node);
+done:
mutex_unlock(&handle->chain->ctrl_mutex);
}
@@ -1500,7 +1582,7 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
}
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
- struct uvc_entity *entity, int rollback)
+ struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl)
{
struct uvc_control *ctrl;
unsigned int i;
@@ -1542,31 +1624,59 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
ctrl->dirty = 0;
- if (ret < 0)
+ if (ret < 0) {
+ if (err_ctrl)
+ *err_ctrl = ctrl;
return ret;
+ }
}
return 0;
}
+static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity,
+ struct v4l2_ext_controls *ctrls,
+ struct uvc_control *uvc_control)
+{
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl_found;
+ unsigned int i;
+
+ if (!entity)
+ return ctrls->count;
+
+ for (i = 0; i < ctrls->count; i++) {
+ __uvc_find_control(entity, ctrls->controls[i].id, &mapping,
+ &ctrl_found, 0);
+ if (uvc_control == ctrl_found)
+ return i;
+ }
+
+ return ctrls->count;
+}
+
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
- const struct v4l2_ext_control *xctrls,
- unsigned int xctrls_count)
+ struct v4l2_ext_controls *ctrls)
{
struct uvc_video_chain *chain = handle->chain;
+ struct uvc_control *err_ctrl;
struct uvc_entity *entity;
int ret = 0;
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
- ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback);
+ ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
+ &err_ctrl);
if (ret < 0)
goto done;
}
if (!rollback)
- uvc_ctrl_send_events(handle, xctrls, xctrls_count);
+ uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count);
done:
+ if (ret < 0 && ctrls)
+ ctrls->error_idx = uvc_ctrl_find_ctrl_idx(entity, ctrls,
+ err_ctrl);
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
@@ -1577,6 +1687,9 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
+ if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
+ return -EACCES;
+
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -EINVAL;
@@ -1596,6 +1709,9 @@ int uvc_ctrl_set(struct uvc_fh *handle,
s32 max;
int ret;
+ if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
+ return -EACCES;
+
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -EINVAL;
@@ -2011,14 +2127,14 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
if (!ctrl->initialized || !ctrl->modified ||
(ctrl->info.flags & UVC_CTRL_FLAG_RESTORE) == 0)
continue;
- dev_info(&dev->udev->dev,
- "restoring control %pUl/%u/%u\n",
- ctrl->info.entity, ctrl->info.index,
- ctrl->info.selector);
+ dev_dbg(&dev->udev->dev,
+ "restoring control %pUl/%u/%u\n",
+ ctrl->info.entity, ctrl->info.index,
+ ctrl->info.selector);
ctrl->dirty = 1;
}
- ret = uvc_ctrl_commit_entity(dev, entity, 0);
+ ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL);
if (ret < 0)
return ret;
}
@@ -2057,11 +2173,12 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
/*
* Add a control mapping to a given control.
*/
-static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
+static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
struct uvc_control *ctrl, const struct uvc_control_mapping *mapping)
{
struct uvc_control_mapping *map;
unsigned int size;
+ unsigned int i;
/* Most mappings come from static kernel data and need to be duplicated.
* Mappings that come from userspace will be unnecessarily duplicated,
@@ -2085,9 +2202,18 @@ static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
if (map->set == NULL)
map->set = uvc_set_le_value;
+ for (i = 0; i < ARRAY_SIZE(uvc_control_classes); i++) {
+ if (V4L2_CTRL_ID2WHICH(uvc_control_classes[i]) ==
+ V4L2_CTRL_ID2WHICH(map->id)) {
+ chain->ctrl_class_bitmap |= BIT(i);
+ break;
+ }
+ }
+
list_add_tail(&map->list, &ctrl->info.mappings);
- uvc_dbg(dev, CONTROL, "Adding mapping '%s' to control %pUl/%u\n",
- map->name, ctrl->info.entity, ctrl->info.selector);
+ uvc_dbg(chain->dev, CONTROL, "Adding mapping '%s' to control %pUl/%u\n",
+ uvc_map_get_name(map), ctrl->info.entity,
+ ctrl->info.selector);
return 0;
}
@@ -2105,7 +2231,7 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
if (mapping->id & ~V4L2_CTRL_ID_MASK) {
uvc_dbg(dev, CONTROL,
"Can't add mapping '%s', control id 0x%08x is invalid\n",
- mapping->name, mapping->id);
+ uvc_map_get_name(mapping), mapping->id);
return -EINVAL;
}
@@ -2152,7 +2278,7 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
if (mapping->id == map->id) {
uvc_dbg(dev, CONTROL,
"Can't add mapping '%s', control id 0x%08x already exists\n",
- mapping->name, mapping->id);
+ uvc_map_get_name(mapping), mapping->id);
ret = -EEXIST;
goto done;
}
@@ -2163,12 +2289,12 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
atomic_dec(&dev->nmappings);
uvc_dbg(dev, CONTROL,
"Can't add mapping '%s', maximum mappings count (%u) exceeded\n",
- mapping->name, UVC_MAX_CONTROL_MAPPINGS);
+ uvc_map_get_name(mapping), UVC_MAX_CONTROL_MAPPINGS);
ret = -ENOMEM;
goto done;
}
- ret = __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ ret = __uvc_ctrl_add_mapping(chain, ctrl, mapping);
if (ret < 0)
atomic_dec(&dev->nmappings);
@@ -2244,7 +2370,8 @@ static void uvc_ctrl_prune_entity(struct uvc_device *dev,
* Add control information and hardcoded stock control mappings to the given
* device.
*/
-static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
+static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl)
{
const struct uvc_control_info *info = uvc_ctrls;
const struct uvc_control_info *iend = info + ARRAY_SIZE(uvc_ctrls);
@@ -2263,14 +2390,14 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
for (; info < iend; ++info) {
if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
ctrl->index == info->index) {
- uvc_ctrl_add_info(dev, ctrl, info);
+ uvc_ctrl_add_info(chain->dev, ctrl, info);
/*
* Retrieve control flags from the device. Ignore errors
* and work with default flag values from the uvc_ctrl
* array when the device doesn't properly implement
* GET_INFO on standard controls.
*/
- uvc_ctrl_get_flags(dev, ctrl, &ctrl->info);
+ uvc_ctrl_get_flags(chain->dev, ctrl, &ctrl->info);
break;
}
}
@@ -2281,22 +2408,20 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
for (; mapping < mend; ++mapping) {
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector)
- __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ __uvc_ctrl_add_mapping(chain, ctrl, mapping);
}
}
/*
* Initialize device controls.
*/
-int uvc_ctrl_init_device(struct uvc_device *dev)
+static int uvc_ctrl_init_chain(struct uvc_video_chain *chain)
{
struct uvc_entity *entity;
unsigned int i;
- INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work);
-
/* Walk the entities list and instantiate controls */
- list_for_each_entry(entity, &dev->entities, list) {
+ list_for_each_entry(entity, &chain->entities, chain) {
struct uvc_control *ctrl;
unsigned int bControlSize = 0, ncontrols;
u8 *bmControls = NULL;
@@ -2316,7 +2441,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
}
/* Remove bogus/blacklisted controls */
- uvc_ctrl_prune_entity(dev, entity);
+ uvc_ctrl_prune_entity(chain->dev, entity);
/* Count supported controls and allocate the controls array */
ncontrols = memweight(bmControls, bControlSize);
@@ -2338,7 +2463,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
ctrl->entity = entity;
ctrl->index = i;
- uvc_ctrl_init_ctrl(dev, ctrl);
+ uvc_ctrl_init_ctrl(chain, ctrl);
ctrl++;
}
}
@@ -2346,6 +2471,22 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
return 0;
}
+int uvc_ctrl_init_device(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+ int ret;
+
+ INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work);
+
+ list_for_each_entry(chain, &dev->chains, list) {
+ ret = uvc_ctrl_init_chain(chain);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Cleanup device controls.
*/
@@ -2357,6 +2498,7 @@ static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev,
list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) {
list_del(&mapping->list);
kfree(mapping->menu_info);
+ kfree(mapping->name);
kfree(mapping);
}
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 9a791d8ef200..7c007426e082 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -16,7 +16,6 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <linux/version.h>
#include <asm/unaligned.h>
#include <media/v4l2-common.h>
@@ -2194,6 +2193,7 @@ int uvc_register_video_device(struct uvc_device *dev,
const struct v4l2_file_operations *fops,
const struct v4l2_ioctl_ops *ioctl_ops)
{
+ const char *name;
int ret;
/* Initialize the video buffers queue. */
@@ -2222,16 +2222,20 @@ int uvc_register_video_device(struct uvc_device *dev,
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
default:
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ name = "Video Capture";
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ name = "Video Output";
break;
case V4L2_BUF_TYPE_META_CAPTURE:
vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
+ name = "Metadata";
break;
}
- strscpy(vdev->name, dev->name, sizeof(vdev->name));
+ snprintf(vdev->name, sizeof(vdev->name), "%s %u", name,
+ stream->header.bTerminalLink);
/*
* Set the driver data before calling video_register_device, otherwise
@@ -2455,14 +2459,14 @@ static int uvc_probe(struct usb_interface *intf,
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error;
- /* Initialize controls. */
- if (uvc_ctrl_init_device(dev) < 0)
- goto error;
-
/* Scan the device for video chains. */
if (uvc_scan_device(dev) < 0)
goto error;
+ /* Initialize controls. */
+ if (uvc_ctrl_init_device(dev) < 0)
+ goto error;
+
/* Register video device nodes. */
if (uvc_register_chains(dev) < 0)
goto error;
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index b6279ad7ac84..82de7781f5b6 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -30,7 +30,7 @@ static int uvc_meta_v4l2_querycap(struct file *file, void *fh,
struct uvc_video_chain *chain = stream->chain;
strscpy(cap->driver, "uvcvideo", sizeof(cap->driver));
- strscpy(cap->card, vfh->vdev->name, sizeof(cap->card));
+ strscpy(cap->card, stream->dev->name, sizeof(cap->card));
usb_make_path(stream->dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
| chain->caps;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 6acb8013de08..f4e4aff8ddf7 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -40,7 +40,13 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
return -ENOMEM;
map->id = xmap->id;
- memcpy(map->name, xmap->name, sizeof(map->name));
+ /* Non standard control id. */
+ if (v4l2_ctrl_get_name(map->id) == NULL) {
+ map->name = kmemdup(xmap->name, sizeof(xmap->name),
+ GFP_KERNEL);
+ if (!map->name)
+ return -ENOMEM;
+ }
memcpy(map->entity, xmap->entity, sizeof(map->entity));
map->selector = xmap->selector;
map->size = xmap->size;
@@ -472,10 +478,13 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
uvc_simplify_fraction(&timeperframe.numerator,
&timeperframe.denominator, 8, 333);
- if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
parm->parm.capture.timeperframe = timeperframe;
- else
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ } else {
parm->parm.output.timeperframe = timeperframe;
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ }
return 0;
}
@@ -614,13 +623,12 @@ static int uvc_v4l2_release(struct file *file)
static int uvc_ioctl_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct uvc_fh *handle = file->private_data;
struct uvc_video_chain *chain = handle->chain;
struct uvc_streaming *stream = handle->stream;
strscpy(cap->driver, "uvcvideo", sizeof(cap->driver));
- strscpy(cap->card, vdev->name, sizeof(cap->card));
+ strscpy(cap->card, handle->stream->dev->name, sizeof(cap->card));
usb_make_path(stream->dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
| chain->caps;
@@ -995,58 +1003,24 @@ static int uvc_ioctl_query_ext_ctrl(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_g_ctrl(struct file *file, void *fh,
- struct v4l2_control *ctrl)
+static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
+ struct v4l2_ext_controls *ctrls,
+ unsigned long ioctl)
{
- struct uvc_fh *handle = fh;
- struct uvc_video_chain *chain = handle->chain;
- struct v4l2_ext_control xctrl;
- int ret;
-
- memset(&xctrl, 0, sizeof(xctrl));
- xctrl.id = ctrl->id;
-
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
-
- ret = uvc_ctrl_get(chain, &xctrl);
- uvc_ctrl_rollback(handle);
- if (ret < 0)
- return ret;
-
- ctrl->value = xctrl.value;
- return 0;
-}
-
-static int uvc_ioctl_s_ctrl(struct file *file, void *fh,
- struct v4l2_control *ctrl)
-{
- struct uvc_fh *handle = fh;
- struct uvc_video_chain *chain = handle->chain;
- struct v4l2_ext_control xctrl;
- int ret;
-
- memset(&xctrl, 0, sizeof(xctrl));
- xctrl.id = ctrl->id;
- xctrl.value = ctrl->value;
-
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
+ struct v4l2_ext_control *ctrl = ctrls->controls;
+ unsigned int i;
+ int ret = 0;
- ret = uvc_ctrl_set(handle, &xctrl);
- if (ret < 0) {
- uvc_ctrl_rollback(handle);
- return ret;
+ for (i = 0; i < ctrls->count; ++ctrl, ++i) {
+ ret = uvc_ctrl_is_accessible(chain, ctrl->id,
+ ioctl == VIDIOC_G_EXT_CTRLS);
+ if (ret)
+ break;
}
- ret = uvc_ctrl_commit(handle, &xctrl, 1);
- if (ret < 0)
- return ret;
+ ctrls->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i : ctrls->count;
- ctrl->value = xctrl.value;
- return 0;
+ return ret;
}
static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
@@ -1058,6 +1032,10 @@ static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
unsigned int i;
int ret;
+ ret = uvc_ctrl_check_access(chain, ctrls, VIDIOC_G_EXT_CTRLS);
+ if (ret < 0)
+ return ret;
+
if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL) {
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
struct v4l2_queryctrl qc = { .id = ctrl->id };
@@ -1094,16 +1072,16 @@ static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
struct v4l2_ext_controls *ctrls,
- bool commit)
+ unsigned long ioctl)
{
struct v4l2_ext_control *ctrl = ctrls->controls;
struct uvc_video_chain *chain = handle->chain;
unsigned int i;
int ret;
- /* Default value cannot be changed */
- if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL)
- return -EINVAL;
+ ret = uvc_ctrl_check_access(chain, ctrls, ioctl);
+ if (ret < 0)
+ return ret;
ret = uvc_ctrl_begin(chain);
if (ret < 0)
@@ -1113,15 +1091,16 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
ret = uvc_ctrl_set(handle, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
- ctrls->error_idx = commit ? ctrls->count : i;
+ ctrls->error_idx = ioctl == VIDIOC_S_EXT_CTRLS ?
+ ctrls->count : i;
return ret;
}
}
ctrls->error_idx = 0;
- if (commit)
- return uvc_ctrl_commit(handle, ctrls->controls, ctrls->count);
+ if (ioctl == VIDIOC_S_EXT_CTRLS)
+ return uvc_ctrl_commit(handle, ctrls);
else
return uvc_ctrl_rollback(handle);
}
@@ -1131,7 +1110,7 @@ static int uvc_ioctl_s_ext_ctrls(struct file *file, void *fh,
{
struct uvc_fh *handle = fh;
- return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, true);
+ return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_S_EXT_CTRLS);
}
static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
@@ -1139,7 +1118,7 @@ static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
{
struct uvc_fh *handle = fh;
- return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, false);
+ return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_TRY_EXT_CTRLS);
}
static int uvc_ioctl_querymenu(struct file *file, void *fh,
@@ -1538,8 +1517,6 @@ const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_s_input = uvc_ioctl_s_input,
.vidioc_queryctrl = uvc_ioctl_queryctrl,
.vidioc_query_ext_ctrl = uvc_ioctl_query_ext_ctrl,
- .vidioc_g_ctrl = uvc_ioctl_g_ctrl,
- .vidioc_s_ctrl = uvc_ioctl_s_ctrl,
.vidioc_g_ext_ctrls = uvc_ioctl_g_ext_ctrls,
.vidioc_s_ext_ctrls = uvc_ioctl_s_ext_ctrls,
.vidioc_try_ext_ctrls = uvc_ioctl_try_ext_ctrls,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index e16464606b14..9f37eaf28ce7 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -115,6 +115,11 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
case 5: /* Invalid unit */
case 6: /* Invalid control */
case 7: /* Invalid Request */
+ /*
+ * The firmware has not properly implemented
+ * the control or there has been a HW error.
+ */
+ return -EIO;
case 8: /* Invalid value within range */
return -EINVAL;
default: /* reserved or unknown */
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index cce5e38133cd..2e5366143b81 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -241,7 +241,7 @@ struct uvc_control_mapping {
struct list_head ev_subs;
u32 id;
- u8 name[32];
+ char *name;
u8 entity[16];
u8 selector;
@@ -476,6 +476,7 @@ struct uvc_video_chain {
struct v4l2_prio_state prio; /* V4L2 priority state */
u32 caps; /* V4L2 chain-wide caps */
+ u8 ctrl_class_bitmap; /* Bitmap of valid classes */
};
struct uvc_stats_frame {
@@ -523,7 +524,7 @@ struct uvc_stats_stream {
unsigned int max_sof; /* Maximum STC.SOF value */
};
-#define UVC_METADATA_BUF_SIZE 1024
+#define UVC_METADATA_BUF_SIZE 10240
/**
* struct uvc_copy_op: Context structure to schedule asynchronous memcpy
@@ -885,21 +886,21 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
int uvc_ctrl_begin(struct uvc_video_chain *chain);
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
- const struct v4l2_ext_control *xctrls,
- unsigned int xctrls_count);
+ struct v4l2_ext_controls *ctrls);
static inline int uvc_ctrl_commit(struct uvc_fh *handle,
- const struct v4l2_ext_control *xctrls,
- unsigned int xctrls_count)
+ struct v4l2_ext_controls *ctrls)
{
- return __uvc_ctrl_commit(handle, 0, xctrls, xctrls_count);
+ return __uvc_ctrl_commit(handle, 0, ctrls);
}
static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
{
- return __uvc_ctrl_commit(handle, 1, NULL, 0);
+ return __uvc_ctrl_commit(handle, 1, NULL);
}
int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
+int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ bool read);
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index cd9e78c63791..0404267f1ae4 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -24,9 +24,9 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
if (!n->ops || !n->ops->bound)
return 0;
@@ -34,9 +34,9 @@ static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
return n->ops->bound(n, subdev, asd);
}
-static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
if (!n->ops || !n->ops->unbind)
return;
@@ -44,7 +44,7 @@ static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
n->ops->unbind(n, subdev, asd);
}
-static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
+static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
{
if (!n->ops || !n->ops->complete)
return 0;
@@ -215,7 +215,7 @@ v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
/* Get v4l2_device related to the notifier if one can be found. */
static struct v4l2_device *
-v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
{
while (notifier->parent)
notifier = notifier->parent;
@@ -227,7 +227,7 @@ v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
* Return true if all child sub-device notifiers are complete, false otherwise.
*/
static bool
-v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd;
@@ -239,7 +239,7 @@ v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier &&
- !v4l2_async_notifier_can_complete(subdev_notifier))
+ !v4l2_async_nf_can_complete(subdev_notifier))
return false;
}
@@ -251,7 +251,7 @@ v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
* sub-devices have been bound; v4l2_device is also available then.
*/
static int
-v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
{
/* Quick check whether there are still more sub-devices here. */
if (!list_empty(&notifier->waiting))
@@ -266,14 +266,14 @@ v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
return 0;
/* Is everything ready? */
- if (!v4l2_async_notifier_can_complete(notifier))
+ if (!v4l2_async_nf_can_complete(notifier))
return 0;
- return v4l2_async_notifier_call_complete(notifier);
+ return v4l2_async_nf_call_complete(notifier);
}
static int
-v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
struct v4l2_device *v4l2_dev,
@@ -287,7 +287,7 @@ static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
if (ret < 0)
return ret;
- ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
+ ret = v4l2_async_nf_call_bound(notifier, sd, asd);
if (ret < 0) {
v4l2_device_unregister_subdev(sd);
return ret;
@@ -315,15 +315,15 @@ static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
*/
subdev_notifier->parent = notifier;
- return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
+ return v4l2_async_nf_try_all_subdevs(subdev_notifier);
}
/* Test all async sub-devices in a notifier for a match. */
static int
-v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_device *v4l2_dev =
- v4l2_async_notifier_find_v4l2_dev(notifier);
+ v4l2_async_nf_find_v4l2_dev(notifier);
struct v4l2_subdev *sd;
if (!v4l2_dev)
@@ -367,7 +367,7 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)
/* Unbind all sub-devices in the notifier tree. */
static void
-v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd, *tmp;
@@ -376,9 +376,9 @@ v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier)
- v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
- v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
v4l2_async_cleanup(sd);
list_move(&sd->async_list, &subdev_list);
@@ -389,8 +389,8 @@ v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
/* See if an async sub-device can be found in a notifier's lists. */
static bool
-__v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd)
+__v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd)
{
struct v4l2_async_subdev *asd_y;
struct v4l2_subdev *sd;
@@ -416,9 +416,8 @@ __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
* If @this_index < 0, search the notifier's entire @asd_list.
*/
static bool
-v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd,
- int this_index)
+v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd, int this_index)
{
struct v4l2_async_subdev *asd_y;
int j = 0;
@@ -435,15 +434,15 @@ v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
/* Check that an asd does not exist in other notifiers. */
list_for_each_entry(notifier, &notifier_list, list)
- if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
+ if (__v4l2_async_nf_has_async_subdev(notifier, asd))
return true;
return false;
}
-static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd,
- int this_index)
+static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd,
+ int this_index)
{
struct device *dev =
notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
@@ -454,8 +453,7 @@ static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
switch (asd->match_type) {
case V4L2_ASYNC_MATCH_I2C:
case V4L2_ASYNC_MATCH_FWNODE:
- if (v4l2_async_notifier_has_async_subdev(notifier, asd,
- this_index)) {
+ if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
return -EEXIST;
}
@@ -469,13 +467,13 @@ static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
return 0;
}
-void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
{
INIT_LIST_HEAD(&notifier->asd_list);
}
-EXPORT_SYMBOL(v4l2_async_notifier_init);
+EXPORT_SYMBOL(v4l2_async_nf_init);
-static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
+static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_subdev *asd;
int ret, i = 0;
@@ -486,18 +484,18 @@ static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
mutex_lock(&list_lock);
list_for_each_entry(asd, &notifier->asd_list, asd_list) {
- ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
+ ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
if (ret)
goto err_unlock;
list_add_tail(&asd->list, &notifier->waiting);
}
- ret = v4l2_async_notifier_try_all_subdevs(notifier);
+ ret = v4l2_async_nf_try_all_subdevs(notifier);
if (ret < 0)
goto err_unbind;
- ret = v4l2_async_notifier_try_complete(notifier);
+ ret = v4l2_async_nf_try_complete(notifier);
if (ret < 0)
goto err_unbind;
@@ -512,7 +510,7 @@ err_unbind:
/*
* On failure, unbind all sub-devices registered through this notifier.
*/
- v4l2_async_notifier_unbind_all_subdevs(notifier);
+ v4l2_async_nf_unbind_all_subdevs(notifier);
err_unlock:
mutex_unlock(&list_lock);
@@ -520,8 +518,8 @@ err_unlock:
return ret;
}
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier)
+int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier)
{
int ret;
@@ -530,16 +528,16 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
notifier->v4l2_dev = v4l2_dev;
- ret = __v4l2_async_notifier_register(notifier);
+ ret = __v4l2_async_nf_register(notifier);
if (ret)
notifier->v4l2_dev = NULL;
return ret;
}
-EXPORT_SYMBOL(v4l2_async_notifier_register);
+EXPORT_SYMBOL(v4l2_async_nf_register);
-int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier)
+int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
+ struct v4l2_async_notifier *notifier)
{
int ret;
@@ -548,21 +546,21 @@ int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
notifier->sd = sd;
- ret = __v4l2_async_notifier_register(notifier);
+ ret = __v4l2_async_nf_register(notifier);
if (ret)
notifier->sd = NULL;
return ret;
}
-EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
+EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
static void
-__v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
{
if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
return;
- v4l2_async_notifier_unbind_all_subdevs(notifier);
+ v4l2_async_nf_unbind_all_subdevs(notifier);
notifier->sd = NULL;
notifier->v4l2_dev = NULL;
@@ -570,17 +568,17 @@ __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
list_del(&notifier->list);
}
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
{
mutex_lock(&list_lock);
- __v4l2_async_notifier_unregister(notifier);
+ __v4l2_async_nf_unregister(notifier);
mutex_unlock(&list_lock);
}
-EXPORT_SYMBOL(v4l2_async_notifier_unregister);
+EXPORT_SYMBOL(v4l2_async_nf_unregister);
-static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_subdev *asd, *tmp;
@@ -601,24 +599,24 @@ static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
}
}
-void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
{
mutex_lock(&list_lock);
- __v4l2_async_notifier_cleanup(notifier);
+ __v4l2_async_nf_cleanup(notifier);
mutex_unlock(&list_lock);
}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
+EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
-int __v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd)
+int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd)
{
int ret;
mutex_lock(&list_lock);
- ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
+ ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
if (ret)
goto unlock;
@@ -628,12 +626,12 @@ unlock:
mutex_unlock(&list_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
- struct fwnode_handle *fwnode,
- unsigned int asd_struct_size)
+__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
int ret;
@@ -645,7 +643,7 @@ __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
asd->match.fwnode = fwnode_handle_get(fwnode);
- ret = __v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_nf_add_subdev(notifier, asd);
if (ret) {
fwnode_handle_put(fwnode);
kfree(asd);
@@ -654,12 +652,12 @@ __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
return asd;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
- struct fwnode_handle *endpoint,
- unsigned int asd_struct_size)
+__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
struct fwnode_handle *remote;
@@ -668,21 +666,19 @@ __v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif
if (!remote)
return ERR_PTR(-ENOTCONN);
- asd = __v4l2_async_notifier_add_fwnode_subdev(notif, remote,
- asd_struct_size);
+ asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
/*
- * Calling __v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
+ * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
* so drop the one we got in fwnode_graph_get_remote_port_parent.
*/
fwnode_handle_put(remote);
return asd;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_remote_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
- int adapter_id, unsigned short address,
- unsigned int asd_struct_size)
+__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
+ unsigned short address, unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
int ret;
@@ -695,7 +691,7 @@ __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
asd->match.i2c.adapter_id = adapter_id;
asd->match.i2c.address = address;
- ret = __v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_nf_add_subdev(notifier, asd);
if (ret) {
kfree(asd);
return ERR_PTR(ret);
@@ -703,7 +699,7 @@ __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
return asd;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_i2c_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
@@ -725,7 +721,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
list_for_each_entry(notifier, &notifier_list, list) {
struct v4l2_device *v4l2_dev =
- v4l2_async_notifier_find_v4l2_dev(notifier);
+ v4l2_async_nf_find_v4l2_dev(notifier);
struct v4l2_async_subdev *asd;
if (!v4l2_dev)
@@ -739,7 +735,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
if (ret)
goto err_unbind;
- ret = v4l2_async_notifier_try_complete(notifier);
+ ret = v4l2_async_nf_try_complete(notifier);
if (ret)
goto err_unbind;
@@ -761,10 +757,10 @@ err_unbind:
*/
subdev_notifier = v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier)
- v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
if (sd->asd)
- v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
v4l2_async_cleanup(sd);
mutex_unlock(&list_lock);
@@ -780,8 +776,8 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
mutex_lock(&list_lock);
- __v4l2_async_notifier_unregister(sd->subdev_notifier);
- __v4l2_async_notifier_cleanup(sd->subdev_notifier);
+ __v4l2_async_nf_unregister(sd->subdev_notifier);
+ __v4l2_async_nf_cleanup(sd->subdev_notifier);
kfree(sd->subdev_notifier);
sd->subdev_notifier = NULL;
@@ -790,7 +786,7 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
list_add(&sd->asd->list, &notifier->waiting);
- v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
}
v4l2_async_cleanup(sd);
@@ -825,7 +821,7 @@ static void print_waiting_subdev(struct seq_file *s,
}
static const char *
-v4l2_async_notifier_name(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
{
if (notifier->v4l2_dev)
return notifier->v4l2_dev->name;
@@ -843,7 +839,7 @@ static int pending_subdevs_show(struct seq_file *s, void *data)
mutex_lock(&list_lock);
list_for_each_entry(notif, &notifier_list, list) {
- seq_printf(s, "%s:\n", v4l2_async_notifier_name(notif));
+ seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
list_for_each_entry(asd, &notif->waiting, list)
print_waiting_subdev(s, asd);
}
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 04af03285a20..df34b2a283bc 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -275,6 +275,9 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ /* Tiled YUV formats */
+ { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+
/* YUV planar formats, non contiguous variant */
{ .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 47aff3b19742..8176769a89fa 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -126,6 +126,9 @@ struct v4l2_format32 {
* @memory: buffer memory type
* @format: frame format, for which buffers are requested
* @capabilities: capabilities of this buffer type.
+ * @flags: additional buffer management attributes (ignored unless the
+ * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and
+ * configured for MMAP streaming I/O).
* @reserved: future extensions
*/
struct v4l2_create_buffers32 {
@@ -134,7 +137,8 @@ struct v4l2_create_buffers32 {
__u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
__u32 capabilities;
- __u32 reserved[7];
+ __u32 flags;
+ __u32 reserved[6];
};
static int get_v4l2_format32(struct v4l2_format *p64,
@@ -182,6 +186,8 @@ static int get_v4l2_create32(struct v4l2_create_buffers *p64,
if (copy_from_user(p64, p32,
offsetof(struct v4l2_create_buffers32, format)))
return -EFAULT;
+ if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags)))
+ return -EFAULT;
return get_v4l2_format32(&p64->format, &p32->format);
}
@@ -227,6 +233,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers *p64,
if (copy_to_user(p32, p64,
offsetof(struct v4l2_create_buffers32, format)) ||
put_user(p64->capabilities, &p32->capabilities) ||
+ put_user(p64->flags, &p32->flags) ||
copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
return -EFAULT;
return put_v4l2_format32(&p64->format, &p32->format);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index c4b5082849b6..70adfc1b9c81 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -687,6 +687,9 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ break;
+
case V4L2_CTRL_TYPE_AREA:
area = p;
if (!area->width || !area->height)
@@ -1240,6 +1243,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix);
+ break;
case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params);
break;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
index 421300e13a41..ebe82b6ba6e6 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-defs.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -997,6 +997,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix";
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters";
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
@@ -1107,6 +1108,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
+ case V4L2_CID_NOTIFY_GAINS: return "Notify Gains";
/* Image processing controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1490,6 +1492,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX:
+ *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX;
+ break;
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS;
break;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 843259c304bb..00457e1e93f6 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -780,11 +780,11 @@ int v4l2_fwnode_device_parse(struct device *dev,
EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse);
static int
-v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
- struct v4l2_async_notifier *notifier,
- struct fwnode_handle *endpoint,
- unsigned int asd_struct_size,
- parse_endpoint_func parse_endpoint)
+v4l2_async_nf_fwnode_parse_endpoint(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size,
+ parse_endpoint_func parse_endpoint)
{
struct v4l2_fwnode_endpoint vep = { .bus_type = 0 };
struct v4l2_async_subdev *asd;
@@ -822,7 +822,7 @@ v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
if (ret < 0)
goto out_err;
- ret = __v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_nf_add_subdev(notifier, asd);
if (ret < 0) {
/* not an error if asd already exists */
if (ret == -EEXIST)
@@ -839,13 +839,11 @@ out_err:
return ret == -ENOTCONN ? 0 : ret;
}
-static int
-__v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- unsigned int port,
- bool has_port,
- parse_endpoint_func parse_endpoint)
+int
+v4l2_async_nf_parse_fwnode_endpoints(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ parse_endpoint_func parse_endpoint)
{
struct fwnode_handle *fwnode;
int ret = 0;
@@ -863,22 +861,11 @@ __v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
if (!is_available)
continue;
- if (has_port) {
- struct fwnode_endpoint ep;
-
- ret = fwnode_graph_parse_endpoint(fwnode, &ep);
- if (ret)
- break;
-
- if (ep.port != port)
- continue;
- }
- ret = v4l2_async_notifier_fwnode_parse_endpoint(dev,
- notifier,
- fwnode,
- asd_struct_size,
- parse_endpoint);
+ ret = v4l2_async_nf_fwnode_parse_endpoint(dev, notifier,
+ fwnode,
+ asd_struct_size,
+ parse_endpoint);
if (ret < 0)
break;
}
@@ -887,18 +874,7 @@ __v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
return ret;
}
-
-int
-v4l2_async_notifier_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint)
-{
- return __v4l2_async_notifier_parse_fwnode_ep(dev, notifier,
- asd_struct_size, 0,
- false, parse_endpoint);
-}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints);
+EXPORT_SYMBOL_GPL(v4l2_async_nf_parse_fwnode_endpoints);
/*
* v4l2_fwnode_reference_parse - parse references for async sub-devices
@@ -942,9 +918,8 @@ static int v4l2_fwnode_reference_parse(struct device *dev,
index++) {
struct v4l2_async_subdev *asd;
- asd = v4l2_async_notifier_add_fwnode_subdev(notifier,
- args.fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(args.fwnode);
if (IS_ERR(asd)) {
/* not an error if asd already exists */
@@ -1243,8 +1218,8 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
index++) {
struct v4l2_async_subdev *asd;
- asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
@@ -1260,7 +1235,7 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
}
/**
- * v4l2_async_notifier_parse_fwnode_sensor - parse common references on
+ * v4l2_async_nf_parse_fwnode_sensor - parse common references on
* sensors for async sub-devices
* @dev: the device node the properties of which are parsed for references
* @notifier: the async notifier where the async subdevs will be added
@@ -1269,7 +1244,7 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
* sensor and set up async sub-devices for them.
*
* Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_release() after it has been unregistered and the async
+ * v4l2_async_nf_release() after it has been unregistered and the async
* sub-devices are no longer in use, even in the case the function returned an
* error.
*
@@ -1278,8 +1253,8 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
* -EINVAL if property parsing failed
*/
static int
-v4l2_async_notifier_parse_fwnode_sensor(struct device *dev,
- struct v4l2_async_notifier *notifier)
+v4l2_async_nf_parse_fwnode_sensor(struct device *dev,
+ struct v4l2_async_notifier *notifier)
{
static const char * const led_props[] = { "led" };
static const struct v4l2_fwnode_int_props props[] = {
@@ -1320,13 +1295,13 @@ int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
if (!notifier)
return -ENOMEM;
- v4l2_async_notifier_init(notifier);
+ v4l2_async_nf_init(notifier);
- ret = v4l2_async_notifier_parse_fwnode_sensor(sd->dev, notifier);
+ ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier);
if (ret < 0)
goto out_cleanup;
- ret = v4l2_async_subdev_notifier_register(sd, notifier);
+ ret = v4l2_async_subdev_nf_register(sd, notifier);
if (ret < 0)
goto out_cleanup;
@@ -1339,10 +1314,10 @@ int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
return 0;
out_unregister:
- v4l2_async_notifier_unregister(notifier);
+ v4l2_async_nf_unregister(notifier);
out_cleanup:
- v4l2_async_notifier_cleanup(notifier);
+ v4l2_async_nf_cleanup(notifier);
kfree(notifier);
return ret;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 05d5db3d85e5..31d0109ce5a8 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -869,7 +869,7 @@ static void v4l_print_default(const void *arg, bool write_only)
pr_cont("driver-specific ioctl\n");
}
-static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
+static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl)
{
__u32 i;
@@ -878,23 +878,41 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
for (i = 0; i < c->count; i++)
c->controls[i].reserved2[0] = 0;
- /* V4L2_CID_PRIVATE_BASE cannot be used as control class
- when using extended controls.
- Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
- is it allowed for backwards compatibility.
- */
- if (!allow_priv && c->which == V4L2_CID_PRIVATE_BASE)
- return 0;
- if (!c->which)
- return 1;
+ switch (c->which) {
+ case V4L2_CID_PRIVATE_BASE:
+ /*
+ * V4L2_CID_PRIVATE_BASE cannot be used as control class
+ * when using extended controls.
+ * Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
+ * is it allowed for backwards compatibility.
+ */
+ if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL)
+ return false;
+ break;
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ /* Default value cannot be changed */
+ if (ioctl == VIDIOC_S_EXT_CTRLS ||
+ ioctl == VIDIOC_TRY_EXT_CTRLS) {
+ c->error_idx = c->count;
+ return false;
+ }
+ return true;
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ return true;
+ case V4L2_CTRL_WHICH_REQUEST_VAL:
+ c->error_idx = c->count;
+ return false;
+ }
+
/* Check that all controls are from the same control class. */
for (i = 0; i < c->count; i++) {
if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) {
- c->error_idx = i;
- return 0;
+ c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i :
+ c->count;
+ return false;
}
}
- return 1;
+ return true;
}
static int check_fmt(struct file *file, enum v4l2_buf_type type)
@@ -1274,7 +1292,6 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break;
case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break;
case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
- case V4L2_PIX_FMT_HM12: descr = "YUV 4:2:0 (16x16 Macroblocks)"; break;
case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
case V4L2_PIX_FMT_NV12: descr = "Y/CbCr 4:2:0"; break;
case V4L2_PIX_FMT_NV21: descr = "Y/CrCb 4:2:0"; break;
@@ -1282,6 +1299,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_NV61: descr = "Y/CrCb 4:2:2"; break;
case V4L2_PIX_FMT_NV24: descr = "Y/CbCr 4:4:4"; break;
case V4L2_PIX_FMT_NV42: descr = "Y/CrCb 4:4:4"; break;
+ case V4L2_PIX_FMT_NV12_4L4: descr = "Y/CbCr 4:2:0 (4x4 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_16L16: descr = "Y/CbCr 4:2:0 (16x16 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_32L32: descr = "Y/CbCr 4:2:0 (32x32 Linear)"; break;
case V4L2_PIX_FMT_NV12M: descr = "Y/CbCr 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV21M: descr = "Y/CrCb 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV16M: descr = "Y/CbCr 4:2:2 (N-C)"; break;
@@ -1346,6 +1366,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break;
case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break;
case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break;
+ case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break;
case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break;
case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break;
case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break;
@@ -1415,7 +1436,6 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
- case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break;
default:
if (fmt->description[0])
return;
@@ -2004,7 +2024,7 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(p, capabilities);
+ CLEAR_AFTER_FIELD(p, flags);
return ops->vidioc_reqbufs(file, fh, p);
}
@@ -2045,7 +2065,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(create, capabilities);
+ CLEAR_AFTER_FIELD(create, flags);
v4l_sanitize_format(&create->format);
@@ -2187,7 +2207,7 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1)) {
+ if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) {
int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
if (ret == 0)
@@ -2206,6 +2226,7 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
+ int ret;
if (vfh && vfh->ctrl_handler)
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
@@ -2221,9 +2242,11 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1))
- return ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
- return -EINVAL;
+ if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL))
+ return -EINVAL;
+ ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
+ p->value = ctrl.value;
+ return ret;
}
static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
@@ -2243,8 +2266,8 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ?
+ ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL;
}
static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
@@ -2264,8 +2287,8 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ?
+ ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL;
}
static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
@@ -2285,8 +2308,8 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ?
+ ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL;
}
/*
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index acf36676e388..0cda6c6baefc 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1736,7 +1736,7 @@ static int msb_init_card(struct memstick_dev *card)
msb->pages_in_block = boot_block->attr.block_size * 2;
msb->block_size = msb->page_size * msb->pages_in_block;
- if (msb->page_size > PAGE_SIZE) {
+ if ((size_t)msb->page_size > PAGE_SIZE) {
/* this isn't supported by linux at all, anyway*/
dbg("device page %d size isn't supported", msb->page_size);
return -EINVAL;
@@ -2156,10 +2156,14 @@ static int msb_init_disk(struct memstick_dev *card)
set_disk_ro(msb->disk, 1);
msb_start(card);
- device_add_disk(&card->dev, msb->disk, NULL);
+ rc = device_add_disk(&card->dev, msb->disk, NULL);
+ if (rc)
+ goto out_cleanup_disk;
dbg("Disk added");
return 0;
+out_cleanup_disk:
+ blk_cleanup_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 22778d0e24f5..c0450397b673 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1239,10 +1239,14 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
- device_add_disk(&card->dev, msb->disk, NULL);
+ rc = device_add_disk(&card->dev, msb->disk, NULL);
+ if (rc)
+ goto out_cleanup_disk;
msb->active = 1;
return 0;
+out_cleanup_disk:
+ blk_cleanup_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index f9a93b0565e1..21cb2a786058 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -882,7 +882,7 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
iounmap(host->addr);
err_out_free:
- kfree(msh);
+ memstick_free_host(msh);
return NULL;
}
@@ -927,8 +927,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
goto err_out_int;
}
- jm = kzalloc(sizeof(struct jmb38x_ms)
- + cnt * sizeof(struct memstick_host *), GFP_KERNEL);
+ jm = kzalloc(struct_size(jm, hosts, cnt), GFP_KERNEL);
if (!jm) {
rc = -ENOMEM;
goto err_out_int;
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index e79a0218c492..1d35d147552d 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -838,15 +838,15 @@ static void r592_remove(struct pci_dev *pdev)
}
memstick_remove_host(dev->host);
+ if (dev->dummy_dma_page)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
+ dev->dummy_dma_page_physical_address);
+
free_irq(dev->irq, dev);
iounmap(dev->mmio);
pci_release_regions(pdev);
pci_disable_device(pdev);
memstick_free_host(dev->host);
-
- if (dev->dummy_dma_page)
- dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
- dev->dummy_dma_page_physical_address);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 431af5e8be2f..90e1bcd03b46 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -258,7 +258,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
mq = &md->queue;
/* Dispatch locking to the block layer */
- req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
count = PTR_ERR(req);
goto out_put;
@@ -266,7 +266,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
blk_execute_rq(NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
- blk_put_request(req);
+ blk_mq_free_request(req);
if (!ret) {
pr_info("%s: Locking boot partition ro until next power on\n",
@@ -646,7 +646,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
* Dispatch the ioctl() into the block request queue.
*/
mq = &md->queue;
- req = blk_get_request(mq->queue,
+ req = blk_mq_alloc_request(mq->queue,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -660,7 +660,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
blk_execute_rq(NULL, req, 0);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
- blk_put_request(req);
+ blk_mq_free_request(req);
cmd_done:
kfree(idata->buf);
@@ -716,7 +716,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
* Dispatch the ioctl()s into the block request queue.
*/
mq = &md->queue;
- req = blk_get_request(mq->queue,
+ req = blk_mq_alloc_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -733,7 +733,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
for (i = 0; i < num_of_cmds && !err; i++)
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
- blk_put_request(req);
+ blk_mq_free_request(req);
cmd_err:
for (i = 0; i < num_of_cmds; i++) {
@@ -2442,9 +2442,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
dev_set_drvdata(&card->dev, md);
- device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ if (ret)
+ goto err_cleanup_queue;
return md;
+ err_cleanup_queue:
+ blk_cleanup_queue(md->disk->queue);
+ blk_mq_free_tag_set(&md->queue.tag_set);
err_kfree:
kfree(md);
out:
@@ -2730,7 +2735,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
int ret;
/* Ask the block layer about the card status */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
@@ -2740,7 +2745,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
*val = ret;
ret = 0;
}
- blk_put_request(req);
+ blk_mq_free_request(req);
return ret;
}
@@ -2766,7 +2771,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
return -ENOMEM;
/* Ask the block layer for the EXT CSD */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_free;
@@ -2775,7 +2780,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(NULL, req, 0);
err = req_to_mmc_queue_req(req)->drv_op_result;
- blk_put_request(req);
+ blk_mq_free_request(req);
if (err) {
pr_err("FAILED %d\n", err);
goto out_free;
diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c
index 67557808cada..fec4fbf16a5b 100644
--- a/drivers/mmc/core/crypto.c
+++ b/drivers/mmc/core/crypto.c
@@ -16,13 +16,13 @@ void mmc_crypto_set_initial_state(struct mmc_host *host)
{
/* Reset might clear all keys, so reprogram all the keys. */
if (host->caps2 & MMC_CAP2_CRYPTO)
- blk_ksm_reprogram_all_keys(&host->ksm);
+ blk_crypto_reprogram_all_keys(&host->crypto_profile);
}
void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
{
if (host->caps2 & MMC_CAP2_CRYPTO)
- blk_ksm_register(&host->ksm, q);
+ blk_crypto_register(&host->crypto_profile, q);
}
EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue);
@@ -30,12 +30,15 @@ void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq)
{
struct request *req = mmc_queue_req_to_req(mqrq);
struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct blk_crypto_keyslot *keyslot;
if (!req->crypt_ctx)
return;
mrq->crypto_ctx = req->crypt_ctx;
- if (req->crypt_keyslot)
- mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot);
+
+ keyslot = req->crypt_keyslot;
+ if (keyslot)
+ mrq->crypto_key_slot = blk_crypto_keyslot_index(keyslot);
}
EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 29e58ffae379..b1c1716dacf0 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1224,6 +1224,14 @@ static int mmc_select_hs400(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
+ if (host->ops->execute_hs400_tuning) {
+ mmc_retune_disable(host);
+ err = host->ops->execute_hs400_tuning(host, card);
+ mmc_retune_enable(host);
+ if (err)
+ goto out_err;
+ }
+
if (host->ops->hs400_complete)
host->ops->hs400_complete(host);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index ae25ffc2e870..e5e94567a9a9 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -38,7 +38,6 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_can_ext_csd(struct mmc_card *card);
-int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4646b7a03db6..c9db24e16af1 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 05e907451df9..dd2a4b6ab6ad 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -39,24 +39,24 @@ static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
int mmc_gpio_alloc(struct mmc_host *host)
{
- struct mmc_gpio *ctx = devm_kzalloc(host->parent,
- sizeof(*ctx), GFP_KERNEL);
-
- if (ctx) {
- ctx->cd_debounce_delay_ms = 200;
- ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL,
- "%s cd", dev_name(host->parent));
- if (!ctx->cd_label)
- return -ENOMEM;
- ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL,
- "%s ro", dev_name(host->parent));
- if (!ctx->ro_label)
- return -ENOMEM;
- host->slot.handler_priv = ctx;
- host->slot.cd_irq = -EINVAL;
- }
+ const char *devname = dev_name(host->parent);
+ struct mmc_gpio *ctx;
+
+ ctx = devm_kzalloc(host->parent, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->cd_debounce_delay_ms = 200;
+ ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s cd", devname);
+ if (!ctx->cd_label)
+ return -ENOMEM;
+ ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s ro", devname);
+ if (!ctx->ro_label)
+ return -ENOMEM;
+ host->slot.handler_priv = ctx;
+ host->slot.cd_irq = -EINVAL;
- return ctx ? 0 : -ENOMEM;
+ return 0;
}
int mmc_gpio_get_ro(struct mmc_host *host)
@@ -178,6 +178,10 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ /* Update default label if no con_id provided */
+ if (!con_id)
+ gpiod_set_consumer_name(desc, ctx->cd_label);
+
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
@@ -226,6 +230,10 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ /* Update default label if no con_id provided */
+ if (!con_id)
+ gpiod_set_consumer_name(desc, ctx->ro_label);
+
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 95b3511b0560..5af8494c31b5 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -315,15 +315,17 @@ config MMC_SDHCI_TEGRA
If unsure, say N.
config MMC_SDHCI_S3C
- tristate "SDHCI support on Samsung S3C SoC"
+ tristate "SDHCI support on Samsung S3C/S5P/Exynos SoC"
depends on MMC_SDHCI
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
This selects the Secure Digital Host Controller Interface (SDHCI)
often referrered to as the HSMMC block in some of the Samsung S3C
- range of SoC.
+ (S3C2416, S3C2443, S3C6410), S5Pv210 and Exynos (Exynso4210,
+ Exynos4412) SoCs.
- If you have a controller with this interface, say Y or M here.
+ If you have a controller with this interface (thereforeyou build for
+ such Samsung SoC), say Y or M here.
If unsure, say N.
@@ -506,7 +508,7 @@ config MMC_OMAP_HS
config MMC_WBSD
tristate "Winbond W83L51xD SD/MMC Card Interface support"
- depends on ISA_DMA_API
+ depends on ISA_DMA_API && !M68K
help
This selects the Winbond(R) W83L51xD Secure digital and
Multimedia card Interface.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 14004cc09aaa..ea36d379bd3c 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o \
sdhci-pci-dwc-mshc.o sdhci-pci-gli.o
-obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index 38559a956330..b0d30c35c390 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -282,6 +282,9 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+
mmc->cqe_on = true;
if (cq_host->ops->enable)
@@ -899,8 +902,8 @@ static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
spin_unlock_irqrestore(&cq_host->lock, flags);
if (timed_out) {
- pr_err("%s: cqhci: timeout for tag %d\n",
- mmc_hostname(mmc), tag);
+ pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
+ mmc_hostname(mmc), tag, cq_host->qcnt);
cqhci_dumpregs(cq_host);
}
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
index 6419cfbb4ab7..d5f4b6972f63 100644
--- a/drivers/mmc/host/cqhci-crypto.c
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -6,7 +6,7 @@
*/
#include <linux/blk-crypto.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#include <linux/mmc/host.h>
#include "cqhci-crypto.h"
@@ -23,9 +23,10 @@ static const struct cqhci_crypto_alg_entry {
};
static inline struct cqhci_host *
-cqhci_host_from_ksm(struct blk_keyslot_manager *ksm)
+cqhci_host_from_crypto_profile(struct blk_crypto_profile *profile)
{
- struct mmc_host *mmc = container_of(ksm, struct mmc_host, ksm);
+ struct mmc_host *mmc =
+ container_of(profile, struct mmc_host, crypto_profile);
return mmc->cqe_private;
}
@@ -57,12 +58,12 @@ static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
return 0;
}
-static int cqhci_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
+static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+ struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile);
const union cqhci_crypto_cap_entry *ccap_array =
cq_host->crypto_cap_array;
const struct cqhci_crypto_alg_entry *alg =
@@ -115,11 +116,11 @@ static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
return cqhci_crypto_program_key(cq_host, &cfg, slot);
}
-static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
+static int cqhci_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+ struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile);
return cqhci_crypto_clear_keyslot(cq_host, slot);
}
@@ -132,7 +133,7 @@ static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
* "enabled" when these are called, i.e. CQHCI_ENABLE might not be set in the
* CQHCI_CFG register. But the hardware allows that.
*/
-static const struct blk_ksm_ll_ops cqhci_ksm_ops = {
+static const struct blk_crypto_ll_ops cqhci_crypto_ops = {
.keyslot_program = cqhci_crypto_keyslot_program,
.keyslot_evict = cqhci_crypto_keyslot_evict,
};
@@ -157,8 +158,8 @@ cqhci_find_blk_crypto_mode(union cqhci_crypto_cap_entry cap)
*
* If the driver previously set MMC_CAP2_CRYPTO and the CQE declares
* CQHCI_CAP_CS, initialize the crypto support. This involves reading the
- * crypto capability registers, initializing the keyslot manager, clearing all
- * keyslots, and enabling 128-bit task descriptors.
+ * crypto capability registers, initializing the blk_crypto_profile, clearing
+ * all keyslots, and enabling 128-bit task descriptors.
*
* Return: 0 if crypto was initialized or isn't supported; whether
* MMC_CAP2_CRYPTO remains set indicates which one of those cases it is.
@@ -168,7 +169,7 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
struct device *dev = mmc_dev(mmc);
- struct blk_keyslot_manager *ksm = &mmc->ksm;
+ struct blk_crypto_profile *profile = &mmc->crypto_profile;
unsigned int num_keyslots;
unsigned int cap_idx;
enum blk_crypto_mode_num blk_mode_num;
@@ -199,15 +200,15 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
*/
num_keyslots = cq_host->crypto_capabilities.config_count + 1;
- err = devm_blk_ksm_init(dev, ksm, num_keyslots);
+ err = devm_blk_crypto_profile_init(dev, profile, num_keyslots);
if (err)
goto out;
- ksm->ksm_ll_ops = cqhci_ksm_ops;
- ksm->dev = dev;
+ profile->ll_ops = cqhci_crypto_ops;
+ profile->dev = dev;
/* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
- ksm->max_dun_bytes_supported = 4;
+ profile->max_dun_bytes_supported = 4;
/*
* Cache all the crypto capabilities and advertise the supported crypto
@@ -223,7 +224,7 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
cq_host->crypto_cap_array[cap_idx]);
if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
continue;
- ksm->crypto_modes_supported[blk_mode_num] |=
+ profile->modes_supported[blk_mode_num] |=
cq_host->crypto_cap_array[cap_idx].sdus_mask * 512;
}
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 0c75810812a0..c2dd29ef45c6 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -442,14 +442,14 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
return sample;
}
-static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
+static s8 dw_mci_exynos_get_best_clksmpl(u8 candidates)
{
const u8 iter = 8;
u8 __c;
s8 i, loc = -1;
for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
+ __c = ror8(candidates, i);
if ((__c & 0xc7) == 0xc7) {
loc = i;
goto out;
@@ -457,13 +457,25 @@ static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
}
for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
+ __c = ror8(candidates, i);
if ((__c & 0x83) == 0x83) {
loc = i;
goto out;
}
}
+ /*
+ * If there is no cadiates value, then it needs to return -EIO.
+ * If there are candidates values and don't find bset clk sample value,
+ * then use a first candidates clock sample value.
+ */
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candidates, i);
+ if ((__c & 0x1) == 0x1) {
+ loc = i;
+ goto out;
+ }
+ }
out:
return loc;
}
@@ -473,7 +485,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
struct dw_mci *host = slot->host;
struct dw_mci_exynos_priv_data *priv = host->priv;
struct mmc_host *mmc = slot->mmc;
- u8 start_smpl, smpl, candiates = 0;
+ u8 start_smpl, smpl, candidates = 0;
s8 found;
int ret = 0;
@@ -484,16 +496,18 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
smpl = dw_mci_exynos_move_next_clksmpl(host);
if (!mmc_send_tuning(mmc, opcode, NULL))
- candiates |= (1 << smpl);
+ candidates |= (1 << smpl);
} while (start_smpl != smpl);
- found = dw_mci_exynos_get_best_clksmpl(candiates);
+ found = dw_mci_exynos_get_best_clksmpl(candidates);
if (found >= 0) {
dw_mci_exynos_set_clksmpl(host, found);
priv->tuned_sample = found;
} else {
ret = -EIO;
+ dev_warn(&mmc->class_dev,
+ "There is no candidates value about clksmpl!\n");
}
return ret;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 380f9aa56eb2..d977f34f6b55 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1611,37 +1611,32 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
usleep_range(200, 300);
}
-static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
+static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare)
{
- struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+ u32 clk_en_a_old;
+ u32 clk_en_a;
/*
* Low power mode will stop the card clock when idle. According to the
* description of the CLKENA register we should disable low power mode
* for SDIO cards if we need SDIO interrupts to work.
*/
- if (mmc->caps & MMC_CAP_SDIO_IRQ) {
- const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
- u32 clk_en_a_old;
- u32 clk_en_a;
- clk_en_a_old = mci_readl(host, CLKENA);
-
- if (card->type == MMC_TYPE_SDIO ||
- card->type == MMC_TYPE_SD_COMBO) {
- set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
- clk_en_a = clk_en_a_old & ~clken_low_pwr;
- } else {
- clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
- clk_en_a = clk_en_a_old | clken_low_pwr;
- }
+ clk_en_a_old = mci_readl(host, CLKENA);
+ if (prepare) {
+ set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old & ~clken_low_pwr;
+ } else {
+ clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old | clken_low_pwr;
+ }
- if (clk_en_a != clk_en_a_old) {
- mci_writel(host, CLKENA, clk_en_a);
- mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
- SDMMC_CMD_PRV_DAT_WAIT, 0);
- }
+ if (clk_en_a != clk_en_a_old) {
+ mci_writel(host, CLKENA, clk_en_a);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT,
+ 0);
}
}
@@ -1669,6 +1664,7 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ dw_mci_prepare_sdio_irq(slot, enb);
__dw_mci_enable_sdio_irq(slot, enb);
/* Avoid runtime suspending the device when SDIO IRQ is enabled */
@@ -1790,7 +1786,6 @@ static const struct mmc_host_ops dw_mci_ops = {
.execute_tuning = dw_mci_execute_tuning,
.card_busy = dw_mci_card_busy,
.start_signal_voltage_switch = dw_mci_switch_voltage,
- .init_card = dw_mci_init_card,
.prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
};
@@ -2086,7 +2081,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if (err != -ETIMEDOUT) {
+ if (err != -ETIMEDOUT &&
+ host->dir_status == DW_MCI_RECV_STATUS) {
state = STATE_SENDING_DATA;
continue;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 3765e2f4ad98..c9cacd4d5b22 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1394,6 +1394,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
} else if (host->variant->busy_timeout && busy_resp &&
status & MCI_DATATIMEOUT) {
cmd->error = -ETIMEDOUT;
+ /*
+ * This will wake up mmci_irq_thread() which will issue
+ * a hardware reset of the MMCI block.
+ */
host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 6c9d38132f74..16d1c7a43d33 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -566,37 +566,37 @@ static int moxart_probe(struct platform_device *pdev)
if (!mmc) {
dev_err(dev, "mmc_alloc_host failed\n");
ret = -ENOMEM;
- goto out;
+ goto out_mmc;
}
ret = of_address_to_resource(node, 0, &res_mmc);
if (ret) {
dev_err(dev, "of_address_to_resource failed\n");
- goto out;
+ goto out_mmc;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
dev_err(dev, "irq_of_parse_and_map failed\n");
ret = -EINVAL;
- goto out;
+ goto out_mmc;
}
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- goto out;
+ goto out_mmc;
}
reg_mmc = devm_ioremap_resource(dev, &res_mmc);
if (IS_ERR(reg_mmc)) {
ret = PTR_ERR(reg_mmc);
- goto out;
+ goto out_mmc;
}
ret = mmc_of_parse(mmc);
if (ret)
- goto out;
+ goto out_mmc;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -621,6 +621,14 @@ static int moxart_probe(struct platform_device *pdev)
ret = -EPROBE_DEFER;
goto out;
}
+ if (!IS_ERR(host->dma_chan_tx)) {
+ dma_release_channel(host->dma_chan_tx);
+ host->dma_chan_tx = NULL;
+ }
+ if (!IS_ERR(host->dma_chan_rx)) {
+ dma_release_channel(host->dma_chan_rx);
+ host->dma_chan_rx = NULL;
+ }
dev_dbg(dev, "PIO mode transfer enabled\n");
host->have_dma = false;
} else {
@@ -675,6 +683,11 @@ static int moxart_probe(struct platform_device *pdev)
return 0;
out:
+ if (!IS_ERR_OR_NULL(host->dma_chan_tx))
+ dma_release_channel(host->dma_chan_tx);
+ if (!IS_ERR_OR_NULL(host->dma_chan_rx))
+ dma_release_channel(host->dma_chan_rx);
+out_mmc:
if (mmc)
mmc_free_host(mmc);
return ret;
@@ -687,9 +700,9 @@ static int moxart_remove(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, NULL);
- if (!IS_ERR(host->dma_chan_tx))
+ if (!IS_ERR_OR_NULL(host->dma_chan_tx))
dma_release_channel(host->dma_chan_tx);
- if (!IS_ERR(host->dma_chan_rx))
+ if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
mmc_remove_host(mmc);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4dfc246c5f95..943940b44e83 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/of_address.h>
@@ -258,6 +259,7 @@
#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
+#define PAD_DS_TUNE_DLY_SEL (0x1 << 0) /* RW */
#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
@@ -301,6 +303,11 @@
#define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */
#define PAD_CMD_TX_DLY (0x1f << 12) /* RW */
+/* EMMC50_PAD_DS_TUNE mask */
+#define PAD_DS_DLY_SEL (0x1 << 16) /* RW */
+#define PAD_DS_DLY1 (0x1f << 10) /* RW */
+#define PAD_DS_DLY3 (0x1f << 0) /* RW */
+
#define REQ_CMD_EIO (0x1 << 0)
#define REQ_CMD_TMO (0x1 << 1)
#define REQ_DAT_ERR (0x1 << 2)
@@ -448,11 +455,13 @@ struct msdc_host {
bool vqmmc_enabled;
u32 latch_ck;
u32 hs400_ds_delay;
+ u32 hs400_ds_dly3;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
bool hs400_cmd_resp_sel_rising;
/* cmd response sample selection for HS400 */
bool hs400_mode; /* current eMMC will run at hs400 mode */
+ bool hs400_tuning; /* hs400 mode online tuning */
bool internal_cd; /* Use internal card-detect logic */
bool cqhci; /* support eMMC hw cmdq */
struct msdc_save_para save_para; /* used when gate HCLK */
@@ -961,7 +970,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
}
static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd)
+ struct mmc_command *cmd)
{
u32 resp;
@@ -997,7 +1006,7 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
* stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
*/
u32 opcode = cmd->opcode;
- u32 resp = msdc_cmd_find_resp(host, mrq, cmd);
+ u32 resp = msdc_cmd_find_resp(host, cmd);
u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7);
host->cmd_rsp = resp;
@@ -1043,8 +1052,8 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
return rawcmd;
}
-static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
- struct mmc_command *cmd, struct mmc_data *data)
+static void msdc_start_data(struct msdc_host *host, struct mmc_command *cmd,
+ struct mmc_data *data)
{
bool read;
@@ -1112,8 +1121,7 @@ static void msdc_recheck_sdio_irq(struct msdc_host *host)
}
}
-static void msdc_track_cmd_data(struct msdc_host *host,
- struct mmc_command *cmd, struct mmc_data *data)
+static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd)
{
if (host->error)
dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
@@ -1134,7 +1142,7 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
- msdc_track_cmd_data(host, mrq->cmd, mrq->data);
+ msdc_track_cmd_data(host, mrq->cmd);
if (mrq->data)
msdc_unprepare_data(host, mrq->data);
if (host->error)
@@ -1190,7 +1198,8 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
if (events & MSDC_INT_CMDTMO ||
(cmd->opcode != MMC_SEND_TUNING_BLOCK &&
- cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200))
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
+ !host->hs400_tuning))
/*
* should not clear fifo/interrupt as the tune data
* may have alreay come when cmd19/cmd21 gets response
@@ -1287,7 +1296,8 @@ static void msdc_cmd_next(struct msdc_host *host,
if ((cmd->error &&
!(cmd->error == -EILSEQ &&
(cmd->opcode == MMC_SEND_TUNING_BLOCK ||
- cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) ||
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200 ||
+ host->hs400_tuning))) ||
(mrq->sbc && mrq->sbc->error))
msdc_request_done(host, mrq);
else if (cmd == mrq->sbc)
@@ -1295,7 +1305,7 @@ static void msdc_cmd_next(struct msdc_host *host,
else if (!cmd->data)
msdc_request_done(host, mrq);
else
- msdc_start_data(host, mrq, cmd, cmd->data);
+ msdc_start_data(host, cmd, cmd->data);
}
static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -2251,6 +2261,67 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
return 0;
}
+static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ struct msdc_delay_phase dly1_delay;
+ u32 val, result_dly1 = 0;
+ u8 *ext_csd;
+ int i, ret;
+
+ if (host->top_base) {
+ sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY_SEL);
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY3, host->hs400_ds_dly3);
+ } else {
+ sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ }
+
+ host->hs400_tuning = true;
+ for (i = 0; i < PAD_DELAY_MAX; i++) {
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY1, i);
+ else
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY1, i);
+ ret = mmc_get_ext_csd(card, &ext_csd);
+ if (!ret)
+ result_dly1 |= (1 << i);
+ }
+ host->hs400_tuning = false;
+
+ dly1_delay = get_best_delay(host, result_dly1);
+ if (dly1_delay.maxlen == 0) {
+ dev_err(host->dev, "Failed to get DLY1 delay!\n");
+ goto fail;
+ }
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY1, dly1_delay.final_phase);
+ else
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY1, dly1_delay.final_phase);
+
+ if (host->top_base)
+ val = readl(host->top_base + EMMC50_PAD_DS_TUNE);
+ else
+ val = readl(host->base + PAD_DS_TUNE);
+
+ dev_info(host->dev, "Fianl PAD_DS_TUNE: 0x%x\n", val);
+
+ return 0;
+
+fail:
+ dev_err(host->dev, "Failed to tuning DS pin delay!\n");
+ return -EIO;
+}
+
static void msdc_hw_reset(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
@@ -2330,6 +2401,7 @@ static void msdc_cqe_enable(struct mmc_host *mmc)
static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
{
struct msdc_host *host = mmc_priv(mmc);
+ unsigned int val = 0;
/* disable cmdq irq */
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ);
@@ -2339,6 +2411,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
+ if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
+ !(val & MSDC_DMA_CFG_STS), 1, 3000)))
+ return;
msdc_reset_hw(host);
}
}
@@ -2377,6 +2452,7 @@ static const struct mmc_host_ops mt_msdc_ops = {
.card_busy = msdc_card_busy,
.execute_tuning = msdc_execute_tuning,
.prepare_hs400_tuning = msdc_prepare_hs400_tuning,
+ .execute_hs400_tuning = msdc_execute_hs400_tuning,
.hw_reset = msdc_hw_reset,
};
@@ -2396,6 +2472,9 @@ static void msdc_of_property_parse(struct platform_device *pdev,
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
+ of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-ds-dly3",
+ &host->hs400_ds_dly3);
+
of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay",
&host->hs200_cmd_int_delay);
@@ -2577,6 +2656,25 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->dma_mask = DMA_BIT_MASK(32);
mmc_dev(mmc)->dma_mask = &host->dma_mask;
+ host->timeout_clks = 3 * 1048576;
+ host->dma.gpd = dma_alloc_coherent(&pdev->dev,
+ 2 * sizeof(struct mt_gpdma_desc),
+ &host->dma.gpd_addr, GFP_KERNEL);
+ host->dma.bd = dma_alloc_coherent(&pdev->dev,
+ MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ &host->dma.bd_addr, GFP_KERNEL);
+ if (!host->dma.gpd || !host->dma.bd) {
+ ret = -ENOMEM;
+ goto release_mem;
+ }
+ msdc_init_gpd_bd(host, &host->dma);
+ INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
+ spin_lock_init(&host->lock);
+
+ platform_set_drvdata(pdev, mmc);
+ msdc_ungate_clock(host);
+ msdc_init_hw(host);
+
if (mmc->caps2 & MMC_CAP2_CQE) {
host->cq_host = devm_kzalloc(mmc->parent,
sizeof(*host->cq_host),
@@ -2597,25 +2695,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
mmc->max_seg_size = 64 * 1024;
}
- host->timeout_clks = 3 * 1048576;
- host->dma.gpd = dma_alloc_coherent(&pdev->dev,
- 2 * sizeof(struct mt_gpdma_desc),
- &host->dma.gpd_addr, GFP_KERNEL);
- host->dma.bd = dma_alloc_coherent(&pdev->dev,
- MAX_BD_NUM * sizeof(struct mt_bdma_desc),
- &host->dma.bd_addr, GFP_KERNEL);
- if (!host->dma.gpd || !host->dma.bd) {
- ret = -ENOMEM;
- goto release_mem;
- }
- msdc_init_gpd_bd(host, &host->dma);
- INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
- spin_lock_init(&host->lock);
-
- platform_set_drvdata(pdev, mmc);
- msdc_ungate_clock(host);
- msdc_init_hw(host);
-
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
IRQF_TRIGGER_NONE, pdev->name, host);
if (ret)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 947581de7860..8c3655d3be96 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -552,6 +552,11 @@ static const struct of_device_id mxs_mmc_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+static void mxs_mmc_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
static int mxs_mmc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -591,6 +596,11 @@ static int mxs_mmc_probe(struct platform_device *pdev)
"Failed to enable vmmc regulator: %d\n", ret);
goto out_mmc_free;
}
+
+ ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable,
+ reg_vmmc);
+ if (ret)
+ goto out_mmc_free;
}
ssp->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 2f8038d69f67..9dafcbf969d9 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -702,11 +702,6 @@ static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
#else
-static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
-{
- return 0;
-}
-
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
}
@@ -1515,7 +1510,7 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
* REVISIT: should be moved to sdio core and made more
* general e.g. by expanding the DT bindings of child nodes
* to provide a mechanism to provide this information:
- * Documentation/devicetree/bindings/mmc/mmc-card.txt
+ * Documentation/devicetree/bindings/mmc/mmc-card.yaml
*/
np = of_get_compatible_child(np, "ti,wl1251");
@@ -2086,6 +2081,7 @@ static int omap_hsmmc_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
struct omap_hsmmc_host *host;
@@ -2153,11 +2149,11 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
spin_unlock_irqrestore(&host->irq_lock, flags);
return 0;
}
+#endif
static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
- .runtime_suspend = omap_hsmmc_runtime_suspend,
- .runtime_resume = omap_hsmmc_runtime_resume,
+ SET_RUNTIME_PM_OPS(omap_hsmmc_runtime_suspend, omap_hsmmc_runtime_resume, NULL)
};
static struct platform_driver omap_hsmmc_driver = {
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8fe65f172a61..f1ef0d28b0dd 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -362,23 +362,11 @@ static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
- struct sdhci_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ret = 0;
if (!gpio_cd)
return 0;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
-
- ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
-out:
- spin_unlock_irqrestore(&host->lock, flags);
-
- return ret;
+ return sdhci_get_cd_nogpio(mmc);
}
static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index f18d169bc8ff..afaf33707d46 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -196,6 +196,9 @@
*/
#define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16)
+/* ERR004536 is not applicable for the IP */
+#define ESDHC_FLAG_SKIP_ERR004536 BIT(17)
+
enum wp_types {
ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
@@ -289,6 +292,13 @@ static const struct esdhc_soc_data usdhc_imx7d_data = {
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
+static struct esdhc_soc_data usdhc_s32g2_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ | ESDHC_FLAG_SKIP_ERR004536,
+};
+
static struct esdhc_soc_data usdhc_imx7ulp_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
@@ -347,6 +357,7 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
{ .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, },
+ { .compatible = "nxp,s32g2-usdhc", .data = &usdhc_s32g2_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -1187,6 +1198,7 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 ctrl;
+ int ret;
/* Reset the tuning circuit */
if (esdhc_is_usdhc(imx_data)) {
@@ -1199,7 +1211,22 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE;
writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
+ /* Make sure ESDHC_MIX_CTRL_EXE_TUNE cleared */
+ ret = readl_poll_timeout(host->ioaddr + SDHCI_AUTO_CMD_STATUS,
+ ctrl, !(ctrl & ESDHC_MIX_CTRL_EXE_TUNE), 1, 50);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "Warning! clear execute tuning bit failed\n");
+ /*
+ * SDHCI_INT_DATA_AVAIL is W1C bit, set this bit will clear the
+ * usdhc IP internal logic flag execute_tuning_with_clr_buf, which
+ * will finally make sure the normal data transfer logic correct.
+ */
+ ctrl = readl(host->ioaddr + SDHCI_INT_STATUS);
+ ctrl |= SDHCI_INT_DATA_AVAIL;
+ writel(ctrl, host->ioaddr + SDHCI_INT_STATUS);
}
}
}
@@ -1359,8 +1386,10 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
* TO1.1, it's harmless for MX6SL
*/
- writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
- host->ioaddr + 0x6c);
+ if (!(imx_data->socdata->flags & ESDHC_FLAG_SKIP_ERR004536)) {
+ writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
+ host->ioaddr + 0x6c);
+ }
/* disable DLL_CTRL delay line settings */
writel(0x0, host->ioaddr + ESDHC_DLL_CTRL);
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 737e2bfdedc2..6a2e5a468424 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -191,6 +191,13 @@ static const struct sdhci_arasan_soc_ctl_map intel_lgm_sdxc_soc_ctl_map = {
.hiword_update = false,
};
+static const struct sdhci_arasan_soc_ctl_map thunderbay_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0x0, .width = 8, .shift = 14 },
+ .clockmultiplier = { .reg = 0x4, .width = 8, .shift = 14 },
+ .support64b = { .reg = 0x4, .width = 1, .shift = 24 },
+ .hiword_update = false,
+};
+
static const struct sdhci_arasan_soc_ctl_map intel_keembay_soc_ctl_map = {
.baseclkfreq = { .reg = 0x0, .width = 8, .shift = 14 },
.clockmultiplier = { .reg = 0x4, .width = 8, .shift = 14 },
@@ -456,6 +463,15 @@ static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static const struct sdhci_pltfm_data sdhci_arasan_thunderbay_pdata = {
+ .ops = &sdhci_arasan_cqe_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC |
+ SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -1132,6 +1148,12 @@ static struct sdhci_arasan_of_data sdhci_arasan_generic_data = {
.clk_ops = &arasan_clk_ops,
};
+static const struct sdhci_arasan_of_data sdhci_arasan_thunderbay_data = {
+ .soc_ctl_map = &thunderbay_soc_ctl_map,
+ .pdata = &sdhci_arasan_thunderbay_pdata,
+ .clk_ops = &arasan_clk_ops,
+};
+
static const struct sdhci_pltfm_data sdhci_keembay_emmc_pdata = {
.ops = &sdhci_arasan_cqe_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
@@ -1265,6 +1287,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "intel,keembay-sdhci-5.1-sdio",
.data = &intel_keembay_sdio_data,
},
+ {
+ .compatible = "intel,thunderbay-sdhci-5.1",
+ .data = &sdhci_arasan_thunderbay_data,
+ },
/* Generic compatible below here */
{
.compatible = "arasan,sdhci-8.9a",
@@ -1626,7 +1652,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") ||
- of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) {
+ of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio") ||
+ of_device_is_compatible(np, "intel,thunderbay-sdhci-5.1")) {
sdhci_arasan_update_clockmultiplier(host, 0x0);
sdhci_arasan_update_support64b(host, 0x0);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 8f4d1f003f65..64e27c2821f9 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/sys_soc.h>
@@ -21,7 +23,14 @@
#include "sdhci-pltfm.h"
-#define SDHCI_OMAP_CON 0x12c
+/*
+ * Note that the register offsets used here are from omap_regs
+ * base which is 0x100 for omap4 and later, and 0 for omap3 and
+ * earlier.
+ */
+#define SDHCI_OMAP_SYSCONFIG 0x10
+
+#define SDHCI_OMAP_CON 0x2c
#define CON_DW8 BIT(5)
#define CON_DMA_MASTER BIT(20)
#define CON_DDR BIT(19)
@@ -31,20 +40,20 @@
#define CON_INIT BIT(1)
#define CON_OD BIT(0)
-#define SDHCI_OMAP_DLL 0x0134
+#define SDHCI_OMAP_DLL 0x34
#define DLL_SWT BIT(20)
#define DLL_FORCE_SR_C_SHIFT 13
#define DLL_FORCE_SR_C_MASK (0x7f << DLL_FORCE_SR_C_SHIFT)
#define DLL_FORCE_VALUE BIT(12)
#define DLL_CALIB BIT(1)
-#define SDHCI_OMAP_CMD 0x20c
+#define SDHCI_OMAP_CMD 0x10c
-#define SDHCI_OMAP_PSTATE 0x0224
+#define SDHCI_OMAP_PSTATE 0x124
#define PSTATE_DLEV_DAT0 BIT(20)
#define PSTATE_DATI BIT(1)
-#define SDHCI_OMAP_HCTL 0x228
+#define SDHCI_OMAP_HCTL 0x128
#define HCTL_SDBP BIT(8)
#define HCTL_SDVS_SHIFT 9
#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
@@ -52,26 +61,28 @@
#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
-#define SDHCI_OMAP_SYSCTL 0x22c
+#define SDHCI_OMAP_SYSCTL 0x12c
#define SYSCTL_CEN BIT(2)
#define SYSCTL_CLKD_SHIFT 6
#define SYSCTL_CLKD_MASK 0x3ff
-#define SDHCI_OMAP_STAT 0x230
+#define SDHCI_OMAP_STAT 0x130
-#define SDHCI_OMAP_IE 0x234
+#define SDHCI_OMAP_IE 0x134
#define INT_CC_EN BIT(0)
-#define SDHCI_OMAP_AC12 0x23c
+#define SDHCI_OMAP_ISE 0x138
+
+#define SDHCI_OMAP_AC12 0x13c
#define AC12_V1V8_SIGEN BIT(19)
#define AC12_SCLK_SEL BIT(23)
-#define SDHCI_OMAP_CAPA 0x240
+#define SDHCI_OMAP_CAPA 0x140
#define CAPA_VS33 BIT(24)
#define CAPA_VS30 BIT(25)
#define CAPA_VS18 BIT(26)
-#define SDHCI_OMAP_CAPA2 0x0244
+#define SDHCI_OMAP_CAPA2 0x144
#define CAPA2_TSDR50 BIT(13)
#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
@@ -89,7 +100,8 @@
#define SDHCI_OMAP_SPECIAL_RESET BIT(1)
struct sdhci_omap_data {
- u32 offset;
+ int omap_offset; /* Offset for omap regs from base */
+ u32 offset; /* Offset for SDHCI regs from base */
u8 flags;
};
@@ -107,12 +119,19 @@ struct sdhci_omap_host {
struct pinctrl *pinctrl;
struct pinctrl_state **pinctrl_state;
+ int wakeirq;
bool is_tuning;
+
+ /* Offset for omap specific registers from base */
+ int omap_offset;
+
/* Omap specific context save */
u32 con;
u32 hctl;
u32 sysctl;
u32 capa;
+ u32 ie;
+ u32 ise;
};
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
@@ -121,13 +140,13 @@ static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host);
static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
unsigned int offset)
{
- return readl(host->base + offset);
+ return readl(host->base + host->omap_offset + offset);
}
static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
unsigned int offset, u32 data)
{
- writel(data, host->base + offset);
+ writel(data, host->base + host->omap_offset + offset);
}
static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
@@ -172,7 +191,7 @@ static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
}
static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
- unsigned int iov)
+ unsigned int iov_pbias)
{
int ret;
struct sdhci_host *host = omap_host->host;
@@ -183,14 +202,15 @@ static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
return ret;
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov);
- if (ret) {
+ /* Pick the right voltage to allow 3.0V for 3.3V nominal PBIAS */
+ ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios);
+ if (ret < 0) {
dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
return ret;
}
}
- ret = sdhci_omap_set_pbias(omap_host, true, iov);
+ ret = sdhci_omap_set_pbias(omap_host, true, iov_pbias);
if (ret)
return ret;
@@ -200,16 +220,28 @@ static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
unsigned char signal_voltage)
{
- u32 reg;
+ u32 reg, capa;
ktime_t timeout;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
reg &= ~HCTL_SDVS_MASK;
- if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- reg |= HCTL_SDVS_33;
- else
+ switch (signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (capa & CAPA_VS33)
+ reg |= HCTL_SDVS_33;
+ else if (capa & CAPA_VS30)
+ reg |= HCTL_SDVS_30;
+ else
+ dev_warn(omap_host->dev, "misconfigured CAPA: %08x\n",
+ capa);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ default:
reg |= HCTL_SDVS_18;
+ break;
+ }
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
@@ -527,28 +559,32 @@ static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
- if (!(reg & CAPA_VS33))
+ if (!(reg & (CAPA_VS30 | CAPA_VS33)))
return -EOPNOTSUPP;
+ if (reg & CAPA_VS30)
+ iov = IOV_3V0;
+ else
+ iov = IOV_3V3;
+
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg &= ~AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
- iov = IOV_3V3;
} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
if (!(reg & CAPA_VS18))
return -EOPNOTSUPP;
+ iov = IOV_1V8;
+
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg |= AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
-
- iov = IOV_1V8;
} else {
return -EOPNOTSUPP;
}
@@ -682,7 +718,24 @@ static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
{
struct mmc_host *mmc = host->mmc;
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
+/*
+ * MMCHS_HL_HWINFO has the MADMA_EN bit set if the controller instance
+ * is connected to L3 interconnect and is bus master capable. Note that
+ * the MMCHS_HL_HWINFO register is in the module registers before the
+ * omap registers and sdhci registers. The offset can vary for omap
+ * registers depending on the SoC. Do not use sdhci_omap_readl() here.
+ */
+static bool sdhci_omap_has_adma(struct sdhci_omap_host *omap_host, int offset)
+{
+ /* MMCHS_HL_HWINFO register is only available on omap4 and later */
+ if (offset < 0x200)
+ return false;
+
+ return readl(omap_host->base + 4) & 1;
}
static int sdhci_omap_enable_dma(struct sdhci_host *host)
@@ -792,6 +845,11 @@ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
unsigned long limit = MMC_TIMEOUT_US;
unsigned long i = 0;
+ u32 sysc;
+
+ /* Save target module sysconfig configured by SoC PM layer */
+ if (mask & SDHCI_RESET_ALL)
+ sysc = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCONFIG);
/* Don't reset data lines during tuning operation */
if (omap_host->is_tuning)
@@ -811,10 +869,15 @@ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
dev_err(mmc_dev(host->mmc),
"Timeout waiting on controller reset in %s\n",
__func__);
- return;
+
+ goto restore_sysc;
}
sdhci_reset(host, mask);
+
+restore_sysc:
+ if (mask & SDHCI_RESET_ALL)
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCONFIG, sysc);
}
#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
@@ -877,34 +940,73 @@ static struct sdhci_ops sdhci_omap_ops = {
.set_timeout = sdhci_omap_set_timeout,
};
-static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+static unsigned int sdhci_omap_regulator_get_caps(struct device *dev,
+ const char *name)
{
- u32 reg;
- int ret = 0;
+ struct regulator *reg;
+ unsigned int caps = 0;
+
+ reg = regulator_get(dev, name);
+ if (IS_ERR(reg))
+ return ~0U;
+
+ if (regulator_is_supported_voltage(reg, 1700000, 1950000))
+ caps |= SDHCI_CAN_VDD_180;
+ if (regulator_is_supported_voltage(reg, 2700000, 3150000))
+ caps |= SDHCI_CAN_VDD_300;
+ if (regulator_is_supported_voltage(reg, 3150000, 3600000))
+ caps |= SDHCI_CAN_VDD_330;
+
+ regulator_put(reg);
+
+ return caps;
+}
+
+static int sdhci_omap_set_capabilities(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
struct device *dev = omap_host->dev;
- struct regulator *vqmmc;
+ const u32 mask = SDHCI_CAN_VDD_180 | SDHCI_CAN_VDD_300 | SDHCI_CAN_VDD_330;
+ unsigned int pbias, vqmmc, caps = 0;
+ u32 reg;
- vqmmc = regulator_get(dev, "vqmmc");
- if (IS_ERR(vqmmc)) {
- ret = PTR_ERR(vqmmc);
- goto reg_put;
- }
+ pbias = sdhci_omap_regulator_get_caps(dev, "pbias");
+ vqmmc = sdhci_omap_regulator_get_caps(dev, "vqmmc");
+ caps = pbias & vqmmc;
+
+ if (pbias != ~0U && vqmmc == ~0U)
+ dev_warn(dev, "vqmmc regulator missing for pbias\n");
+ else if (caps == ~0U)
+ return 0;
+
+ /*
+ * Quirk handling to allow 3.0V vqmmc with a valid 3.3V PBIAS. This is
+ * needed for 3.0V ldo9_reg on omap5 at least.
+ */
+ if (pbias != ~0U && (pbias & SDHCI_CAN_VDD_330) &&
+ (vqmmc & SDHCI_CAN_VDD_300))
+ caps |= SDHCI_CAN_VDD_330;
/* voltage capabilities might be set by boot loader, clear it */
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
- if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3))
- reg |= CAPA_VS33;
- if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8))
+ if (caps & SDHCI_CAN_VDD_180)
reg |= CAPA_VS18;
+ if (caps & SDHCI_CAN_VDD_300)
+ reg |= CAPA_VS30;
+
+ if (caps & SDHCI_CAN_VDD_330)
+ reg |= CAPA_VS33;
+
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
-reg_put:
- regulator_put(vqmmc);
+ host->caps &= ~mask;
+ host->caps |= caps;
- return ret;
+ return 0;
}
static const struct sdhci_pltfm_data sdhci_omap_pdata = {
@@ -920,26 +1022,56 @@ static const struct sdhci_pltfm_data sdhci_omap_pdata = {
.ops = &sdhci_omap_ops,
};
+static const struct sdhci_omap_data omap2430_data = {
+ .omap_offset = 0,
+ .offset = 0x100,
+};
+
+static const struct sdhci_omap_data omap3_data = {
+ .omap_offset = 0,
+ .offset = 0x100,
+};
+
+static const struct sdhci_omap_data omap4_data = {
+ .omap_offset = 0x100,
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data omap5_data = {
+ .omap_offset = 0x100,
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
static const struct sdhci_omap_data k2g_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
};
static const struct sdhci_omap_data am335_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data am437_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data dra7_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_REQUIRE_IODELAY,
};
static const struct of_device_id omap_sdhci_match[] = {
+ { .compatible = "ti,omap2430-sdhci", .data = &omap2430_data },
+ { .compatible = "ti,omap3-sdhci", .data = &omap3_data },
+ { .compatible = "ti,omap4-sdhci", .data = &omap4_data },
+ { .compatible = "ti,omap5-sdhci", .data = &omap5_data },
{ .compatible = "ti,dra7-sdhci", .data = &dra7_data },
{ .compatible = "ti,k2g-sdhci", .data = &k2g_data },
{ .compatible = "ti,am335-sdhci", .data = &am335_data },
@@ -1122,6 +1254,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
omap_host->power_mode = MMC_POWER_UNDEFINED;
omap_host->timing = MMC_TIMING_LEGACY;
omap_host->flags = data->flags;
+ omap_host->omap_offset = data->omap_offset;
+ omap_host->con = -EINVAL; /* Prevent invalid restore on first resume */
host->ioaddr += offset;
host->mapbase = regs->start + offset;
@@ -1172,6 +1306,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
* SYSCONFIG register of omap devices. The callback will be invoked
* as part of pm_runtime_get_sync.
*/
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret) {
@@ -1179,10 +1315,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
goto err_rpm_disable;
}
- ret = sdhci_omap_set_capabilities(omap_host);
+ ret = sdhci_omap_set_capabilities(host);
if (ret) {
dev_err(dev, "failed to set system capabilities\n");
- goto err_put_sync;
+ goto err_rpm_put;
}
host->mmc_host_ops.start_signal_voltage_switch =
@@ -1192,16 +1328,28 @@ static int sdhci_omap_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
- /* Switch to external DMA only if there is the "dmas" property */
- if (of_find_property(dev->of_node, "dmas", NULL))
+ /*
+ * Switch to external DMA only if there is the "dmas" property and
+ * ADMA is not available on the controller instance.
+ */
+ if (device_property_present(dev, "dmas") &&
+ !sdhci_omap_has_adma(omap_host, offset))
sdhci_switch_external_dma(host, true);
+ if (device_property_read_bool(dev, "ti,non-removable")) {
+ dev_warn_once(dev, "using old ti,non-removable property\n");
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
/* R1B responses is required to properly manage HW busy detection. */
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+ /* Allow card power off and runtime PM for eMMC/SD card devices */
+ mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM;
+
ret = sdhci_setup_host(host);
if (ret)
- goto err_put_sync;
+ goto err_rpm_put;
ret = sdhci_omap_config_iodelay_pinctrl_state(omap_host);
if (ret)
@@ -1211,15 +1359,38 @@ static int sdhci_omap_probe(struct platform_device *pdev)
if (ret)
goto err_cleanup_host;
+ /*
+ * SDIO devices can use the dat1 pin as a wake-up interrupt. Some
+ * devices like wl1xxx, use an out-of-band GPIO interrupt instead.
+ */
+ omap_host->wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ if (omap_host->wakeirq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_cleanup_host;
+ }
+ if (omap_host->wakeirq > 0) {
+ device_init_wakeup(dev, true);
+ ret = dev_pm_set_dedicated_wake_irq(dev, omap_host->wakeirq);
+ if (ret) {
+ device_init_wakeup(dev, false);
+ goto err_cleanup_host;
+ }
+ host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
err_cleanup_host:
sdhci_cleanup_host(host);
-err_put_sync:
- pm_runtime_put_sync(dev);
-
+err_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
err_rpm_disable:
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
err_pltfm_free:
@@ -1232,64 +1403,81 @@ static int sdhci_omap_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(dev);
sdhci_remove_host(host, true);
+ device_init_wakeup(dev, false);
+ dev_pm_clear_wake_irq(dev);
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
+ /* Ensure device gets disabled despite userspace sysfs config */
+ pm_runtime_force_suspend(dev);
sdhci_pltfm_free(pdev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static void sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
+
+#ifdef CONFIG_PM
+static void __maybe_unused sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
{
omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ omap_host->sysctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
omap_host->capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ omap_host->ie = sdhci_omap_readl(omap_host, SDHCI_OMAP_IE);
+ omap_host->ise = sdhci_omap_readl(omap_host, SDHCI_OMAP_ISE);
}
-static void sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
+/* Order matters here, HCTL must be restored in two phases */
+static void __maybe_unused sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
{
- sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, omap_host->sysctl);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_IE, omap_host->ie);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_ISE, omap_host->ise);
}
-static int __maybe_unused sdhci_omap_suspend(struct device *dev)
+static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- sdhci_suspend_host(host);
+ sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host);
pinctrl_pm_select_idle_state(dev);
- pm_runtime_force_suspend(dev);
-
return 0;
}
-static int __maybe_unused sdhci_omap_resume(struct device *dev)
+static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- pm_runtime_force_resume(dev);
-
pinctrl_pm_select_default_state(dev);
- sdhci_omap_context_restore(omap_host);
+ if (omap_host->con != -EINVAL)
+ sdhci_omap_context_restore(omap_host);
- sdhci_resume_host(host);
+ sdhci_runtime_resume_host(host, 0);
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(sdhci_omap_dev_pm_ops, sdhci_omap_suspend,
- sdhci_omap_resume);
+
+static const struct dev_pm_ops sdhci_omap_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sdhci_omap_runtime_suspend,
+ sdhci_omap_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
static struct platform_driver sdhci_omap_driver = {
.probe = sdhci_omap_probe,
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index be19785227fe..6f9877546830 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -17,8 +17,6 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -26,11 +24,13 @@
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/debugfs.h>
-#include <linux/mmc/slot-gpio.h>
-#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
+
#ifdef CONFIG_X86
#include <asm/iosf_mbi.h>
#endif
@@ -345,73 +345,6 @@ static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
-#ifdef CONFIG_PM
-
-static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id)
-{
- struct sdhci_pci_slot *slot = dev_id;
- struct sdhci_host *host = slot->host;
-
- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
- return IRQ_HANDLED;
-}
-
-static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
-{
- int err, irq, gpio = slot->cd_gpio;
-
- slot->cd_gpio = -EINVAL;
- slot->cd_irq = -EINVAL;
-
- if (!gpio_is_valid(gpio))
- return;
-
- err = devm_gpio_request(&slot->chip->pdev->dev, gpio, "sd_cd");
- if (err < 0)
- goto out;
-
- err = gpio_direction_input(gpio);
- if (err < 0)
- goto out_free;
-
- irq = gpio_to_irq(gpio);
- if (irq < 0)
- goto out_free;
-
- err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING, "sd_cd", slot);
- if (err)
- goto out_free;
-
- slot->cd_gpio = gpio;
- slot->cd_irq = irq;
-
- return;
-
-out_free:
- devm_gpio_free(&slot->chip->pdev->dev, gpio);
-out:
- dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
-}
-
-static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
-{
- if (slot->cd_irq >= 0)
- free_irq(slot->cd_irq, slot);
-}
-
-#else
-
-static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
-{
-}
-
-static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
-{
-}
-
-#endif
-
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
@@ -619,23 +552,16 @@ static int intel_select_drive_strength(struct mmc_card *card,
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
- struct sdhci_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ret = 0;
if (!gpio_cd)
return 0;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
-
- ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
-out:
- spin_unlock_irqrestore(&host->lock, flags);
+ return sdhci_get_cd_nogpio(mmc);
+}
- return ret;
+static int mrfld_get_cd(struct mmc_host *mmc)
+{
+ return sdhci_get_cd_nogpio(mmc);
}
#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
@@ -1341,6 +1267,14 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
MMC_CAP_1_8V_DDR;
break;
case INTEL_MRFLD_SD:
+ slot->cd_idx = 0;
+ slot->cd_override_level = true;
+ /*
+ * There are two PCB designs of SD card slot with the opposite
+ * card detection sense. Quirk this out by ignoring GPIO state
+ * completely in the custom ->get_cd() callback.
+ */
+ slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
@@ -1981,21 +1915,6 @@ int sdhci_pci_enable_dma(struct sdhci_host *host)
return 0;
}
-static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
-{
- struct sdhci_pci_slot *slot = sdhci_priv(host);
- int rst_n_gpio = slot->rst_n_gpio;
-
- if (!gpio_is_valid(rst_n_gpio))
- return;
- gpio_set_value_cansleep(rst_n_gpio, 0);
- /* For eMMC, minimum is 1us but give it 10us for good measure */
- udelay(10);
- gpio_set_value_cansleep(rst_n_gpio, 1);
- /* For eMMC, minimum is 200us but give it 300us for good measure */
- usleep_range(300, 1000);
-}
-
static void sdhci_pci_hw_reset(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -2126,26 +2045,8 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
slot->chip = chip;
slot->host = host;
- slot->rst_n_gpio = -EINVAL;
- slot->cd_gpio = -EINVAL;
slot->cd_idx = -1;
- /* Retrieve platform data if there is any */
- if (*sdhci_pci_get_data)
- slot->data = sdhci_pci_get_data(pdev, slotno);
-
- if (slot->data) {
- if (slot->data->setup) {
- ret = slot->data->setup(slot->data);
- if (ret) {
- dev_err(&pdev->dev, "platform setup failed\n");
- goto free;
- }
- }
- slot->rst_n_gpio = slot->data->rst_n_gpio;
- slot->cd_gpio = slot->data->cd_gpio;
- }
-
host->hw_name = "PCI";
host->ops = chip->fixes && chip->fixes->ops ?
chip->fixes->ops :
@@ -2169,17 +2070,6 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
goto cleanup;
}
- if (gpio_is_valid(slot->rst_n_gpio)) {
- if (!devm_gpio_request(&pdev->dev, slot->rst_n_gpio, "eMMC_reset")) {
- gpio_direction_output(slot->rst_n_gpio, 1);
- slot->host->mmc->caps |= MMC_CAP_HW_RESET;
- slot->hw_reset = sdhci_pci_gpio_hw_reset;
- } else {
- dev_warn(&pdev->dev, "failed to request rst_n_gpio\n");
- slot->rst_n_gpio = -EINVAL;
- }
- }
-
host->mmc->pm_caps = MMC_PM_KEEP_POWER;
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
@@ -2214,15 +2104,11 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
if (ret)
goto remove;
- sdhci_pci_add_own_cd(slot);
-
/*
* Check if the chip needs a separate GPIO for card detect to wake up
* from runtime suspend. If it is not there, don't allow runtime PM.
- * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
*/
- if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
- !gpio_is_valid(slot->cd_gpio) && slot->cd_idx < 0)
+ if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
chip->allow_runtime_pm = false;
return slot;
@@ -2232,10 +2118,6 @@ remove:
chip->fixes->remove_slot(slot, 0);
cleanup:
- if (slot->data && slot->data->cleanup)
- slot->data->cleanup(slot->data);
-
-free:
sdhci_free_host(host);
return ERR_PTR(ret);
@@ -2246,8 +2128,6 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
int dead;
u32 scratch;
- sdhci_pci_remove_own_cd(slot);
-
dead = 0;
scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
@@ -2258,9 +2138,6 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
if (slot->chip->fixes && slot->chip->fixes->remove_slot)
slot->chip->fixes->remove_slot(slot, dead);
- if (slot->data && slot->data->cleanup)
- slot->data->cleanup(slot->data);
-
sdhci_free_host(slot->host);
}
diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c
deleted file mode 100644
index 18638fb363d8..000000000000
--- a/drivers/mmc/host/sdhci-pci-data.c
+++ /dev/null
@@ -1,6 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
-#include <linux/mmc/sdhci-pci-data.h>
-
-struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno);
-EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 51d55a87aebe..f045c1ee4667 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -489,7 +489,7 @@ static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip,
ret = pci_find_capability(chip->pdev, PCI_CAP_ID_MSI);
if (!ret) {
- pr_info("%s: unsupport msi, use INTx irq\n",
+ pr_info("%s: unsupported MSI, use INTx irq\n",
mmc_hostname(host->mmc));
return;
}
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 8f90c4163bb5..5e3193278ff9 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -156,11 +156,6 @@ struct sdhci_pci_fixes {
struct sdhci_pci_slot {
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
- struct sdhci_pci_data *data;
-
- int rst_n_gpio;
- int cd_gpio;
- int cd_irq;
int cd_idx;
bool cd_override_level;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 862f033d235d..9085f3932443 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -791,4 +791,3 @@ module_platform_driver(sdhci_s3c_driver);
MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c-sdhci");
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 11e375579cfb..f33e9349e4e6 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -39,6 +40,9 @@
#define SDHCI_SPRD_BIT_POSRD_DLY_INV BIT(21)
#define SDHCI_SPRD_BIT_NEGRD_DLY_INV BIT(29)
+#define SDHCI_SPRD_REG_32_DLL_STS0 0x210
+#define SDHCI_SPRD_DLL_LOCKED BIT(18)
+
#define SDHCI_SPRD_REG_32_BUSY_POSI 0x250
#define SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN BIT(25)
#define SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN BIT(24)
@@ -256,6 +260,15 @@ static void sdhci_sprd_enable_phy_dll(struct sdhci_host *host)
sdhci_writel(host, tmp, SDHCI_SPRD_REG_32_DLL_CFG);
/* wait 1ms */
usleep_range(1000, 1250);
+
+ if (read_poll_timeout(sdhci_readl, tmp, (tmp & SDHCI_SPRD_DLL_LOCKED),
+ 2000, USEC_PER_SEC, false, host, SDHCI_SPRD_REG_32_DLL_STS0)) {
+ pr_err("%s: DLL locked fail!\n", mmc_hostname(host->mmc));
+ pr_info("%s: DLL_STS0 : 0x%x, DLL_CFG : 0x%x\n",
+ mmc_hostname(host->mmc),
+ sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_STS0),
+ sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_CFG));
+ }
}
static void sdhci_sprd_set_clock(struct sdhci_host *host, unsigned int clock)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8eefa7d5fe85..269c86569402 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -930,7 +930,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
struct mmc_data *data;
unsigned target_timeout, current_timeout;
- *too_big = true;
+ *too_big = false;
/*
* If the host controller provides us with an incorrect timeout
@@ -941,7 +941,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
return host->max_timeout_count;
- /* Unspecified command, asume max */
+ /* Unspecified command, assume max */
if (cmd == NULL)
return host->max_timeout_count;
@@ -968,17 +968,14 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
while (current_timeout < target_timeout) {
count++;
current_timeout <<= 1;
- if (count > host->max_timeout_count)
+ if (count > host->max_timeout_count) {
+ if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
+ DBG("Too large timeout 0x%x requested for CMD%d!\n",
+ count, cmd->opcode);
+ count = host->max_timeout_count;
+ *too_big = true;
break;
- }
-
- if (count > host->max_timeout_count) {
- if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
- DBG("Too large timeout 0x%x requested for CMD%d!\n",
- count, cmd->opcode);
- count = host->max_timeout_count;
- } else {
- *too_big = false;
+ }
}
return count;
@@ -2042,6 +2039,12 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
break;
case MMC_VDD_32_33:
case MMC_VDD_33_34:
+ /*
+ * 3.4 ~ 3.6V are valid only for those platforms where it's
+ * known that the voltage range is supported by hardware.
+ */
+ case MMC_VDD_34_35:
+ case MMC_VDD_35_36:
pwr = SDHCI_POWER_330;
break;
default:
@@ -2422,6 +2425,25 @@ static int sdhci_get_cd(struct mmc_host *mmc)
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
+int sdhci_get_cd_nogpio(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
+
static int sdhci_check_ro(struct sdhci_host *host)
{
unsigned long flags;
@@ -3232,7 +3254,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
-ETIMEDOUT :
-EILSEQ;
- if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+ if (sdhci_auto_cmd23(host, mrq)) {
mrq->sbc->error = err;
__sdhci_finish_mrq(host, mrq);
return;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e8d04e42a5af..bb883553d3b4 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -750,7 +750,6 @@ static inline void *sdhci_priv(struct sdhci_host *host)
return host->private;
}
-void sdhci_card_detect(struct sdhci_host *host);
void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
const u32 *caps, const u32 *caps1);
int sdhci_setup_host(struct sdhci_host *host);
@@ -775,6 +774,7 @@ void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
unsigned short vdd);
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+int sdhci_get_cd_nogpio(struct mmc_host *mmc);
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 7dfc26f48c18..e2affa52ef46 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -195,6 +195,10 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
host->sdcard_irq_mask = host->sdcard_irq_mask_all;
+ if (host->native_hotplug)
+ tmio_mmc_enable_mmc_irqs(host,
+ TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+
tmio_mmc_set_bus_width(host, host->mmc->ios.bus_width);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
@@ -956,8 +960,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_OFF:
tmio_mmc_power_off(host);
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
- if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) {
host->reset(host);
+
+ if (host->native_hotplug)
+ tmio_mmc_enable_mmc_irqs(host,
+ TMIO_STAT_CARD_REMOVE |
+ TMIO_STAT_CARD_INSERT);
+ }
+
host->set_clock(host, 0);
break;
case MMC_POWER_UP:
@@ -1185,10 +1196,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
_host->set_clock(_host, 0);
tmio_mmc_reset(_host);
- if (_host->native_hotplug)
- tmio_mmc_enable_mmc_irqs(_host,
- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
-
spin_lock_init(&_host->lock);
mutex_init(&_host->ios_lock);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 4950d10d3a19..97beece62fec 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -576,7 +576,7 @@ static void check_vub300_port_status(struct vub300_mmc_host *vub300)
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
- sizeof(vub300->system_port_status), HZ);
+ sizeof(vub300->system_port_status), 1000);
if (sizeof(vub300->system_port_status) == retval)
new_system_port_status(vub300);
}
@@ -1241,7 +1241,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
SET_INTERRUPT_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
- xfer_buffer, xfer_length, HZ);
+ xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
@@ -1284,7 +1284,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
SET_TRANSFER_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
- xfer_buffer, xfer_length, HZ);
+ xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
@@ -1991,7 +1991,7 @@ static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_CLOCK_SPEED,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x00, 0x00, buf, buf_array_size, HZ);
+ 0x00, 0x00, buf, buf_array_size, 1000);
if (retval != 8) {
dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
" %dkHz failed with retval=%d\n", kHzClock, retval);
@@ -2013,14 +2013,14 @@ static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0000, 0x0000, NULL, 0, HZ);
+ 0x0000, 0x0000, NULL, 0, 1000);
/* must wait for the VUB300 u-proc to boot up */
msleep(600);
} else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0001, 0x0000, NULL, 0, HZ);
+ 0x0001, 0x0000, NULL, 0, 1000);
msleep(600);
vub300->card_powered = 1;
} else if (ios->power_mode == MMC_POWER_ON) {
@@ -2275,14 +2275,14 @@ static int vub300_probe(struct usb_interface *interface,
GET_HC_INF0,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->hc_info,
- sizeof(vub300->hc_info), HZ);
+ sizeof(vub300->hc_info), 1000);
if (retval < 0)
goto error5;
retval =
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_ROM_WAIT_STATES,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
+ firmware_rom_wait_states, 0x0000, NULL, 0, 1000);
if (retval < 0)
goto error5;
dev_info(&vub300->udev->dev,
@@ -2297,7 +2297,7 @@ static int vub300_probe(struct usb_interface *interface,
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
- sizeof(vub300->system_port_status), HZ);
+ sizeof(vub300->system_port_status), 1000);
if (retval < 0) {
goto error4;
} else if (sizeof(vub300->system_port_status) == retval) {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index b8ae1ec14e17..4eaba6f4ec68 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -384,7 +384,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (new->readonly)
set_disk_ro(gd, 1);
- device_add_disk(&new->mtd->dev, gd, NULL);
+ ret = device_add_disk(&new->mtd->dev, gd, NULL);
+ if (ret)
+ goto out_cleanup_disk;
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
@@ -393,6 +395,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
return 0;
+out_cleanup_disk:
+ blk_cleanup_disk(new->disk);
out_free_tag_set:
blk_mq_free_tag_set(new->tag_set);
out_kfree_tag_set:
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 38b6aa849c63..5ff001140ef4 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/major.h>
#include <linux/backing-dev.h>
+#include <linux/blkdev.h>
#include <linux/fs_context.h>
#include "mtdcore.h"
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 7734229aa078..12b60ad95b02 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -290,31 +290,33 @@ struct flexcan_regs {
u32 dbg1; /* 0x58 */
u32 dbg2; /* 0x5c */
u32 _reserved3[8]; /* 0x60 */
- u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
- /* FIFO-mode:
- * MB
- * 0x080...0x08f 0 RX message buffer
- * 0x090...0x0df 1-5 reserved
- * 0x0e0...0x0ff 6-7 8 entry ID table
- * (mx25, mx28, mx35, mx53)
- * 0x0e0...0x2df 6-7..37 8..128 entry ID table
- * size conf'ed via ctrl2::RFFN
- * (mx6, vf610)
- */
- u32 _reserved4[256]; /* 0x480 */
- u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
- u32 _reserved5[24]; /* 0x980 */
- u32 gfwr_mx6; /* 0x9e0 - MX6 */
- u32 _reserved6[39]; /* 0x9e4 */
- u32 _rxfir[6]; /* 0xa80 */
- u32 _reserved8[2]; /* 0xa98 */
- u32 _rxmgmask; /* 0xaa0 */
- u32 _rxfgmask; /* 0xaa4 */
- u32 _rx14mask; /* 0xaa8 */
- u32 _rx15mask; /* 0xaac */
- u32 tx_smb[4]; /* 0xab0 */
- u32 rx_smb0[4]; /* 0xac0 */
- u32 rx_smb1[4]; /* 0xad0 */
+ struct_group(init,
+ u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
+ /* FIFO-mode:
+ * MB
+ * 0x080...0x08f 0 RX message buffer
+ * 0x090...0x0df 1-5 reserved
+ * 0x0e0...0x0ff 6-7 8 entry ID table
+ * (mx25, mx28, mx35, mx53)
+ * 0x0e0...0x2df 6-7..37 8..128 entry ID table
+ * size conf'ed via ctrl2::RFFN
+ * (mx6, vf610)
+ */
+ u32 _reserved4[256]; /* 0x480 */
+ u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
+ u32 _reserved5[24]; /* 0x980 */
+ u32 gfwr_mx6; /* 0x9e0 - MX6 */
+ u32 _reserved6[39]; /* 0x9e4 */
+ u32 _rxfir[6]; /* 0xa80 */
+ u32 _reserved8[2]; /* 0xa98 */
+ u32 _rxmgmask; /* 0xaa0 */
+ u32 _rxfgmask; /* 0xaa4 */
+ u32 _rx14mask; /* 0xaa8 */
+ u32 _rx15mask; /* 0xaac */
+ u32 tx_smb[4]; /* 0xab0 */
+ u32 rx_smb0[4]; /* 0xac0 */
+ u32 rx_smb1[4]; /* 0xad0 */
+ );
u32 mecr; /* 0xae0 */
u32 erriar; /* 0xae4 */
u32 erridpr; /* 0xae8 */
@@ -328,9 +330,11 @@ struct flexcan_regs {
u32 fdcbt; /* 0xc04 - Not affected by Soft Reset */
u32 fdcrc; /* 0xc08 */
u32 _reserved9[199]; /* 0xc0c */
- u32 tx_smb_fd[18]; /* 0xf28 */
- u32 rx_smb0_fd[18]; /* 0xf70 */
- u32 rx_smb1_fd[18]; /* 0xfb8 */
+ struct_group(init_fd,
+ u32 tx_smb_fd[18]; /* 0xf28 */
+ u32 rx_smb0_fd[18]; /* 0xf70 */
+ u32 rx_smb1_fd[18]; /* 0xfb8 */
+ );
};
static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
@@ -1400,14 +1404,10 @@ static void flexcan_ram_init(struct net_device *dev)
reg_ctrl2 |= FLEXCAN_CTRL2_WRMFRZ;
priv->write(reg_ctrl2, &regs->ctrl2);
- memset_io(&regs->mb[0][0], 0,
- offsetof(struct flexcan_regs, rx_smb1[3]) -
- offsetof(struct flexcan_regs, mb[0][0]) + 0x4);
+ memset_io(&regs->init, 0, sizeof(regs->init));
if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
- memset_io(&regs->tx_smb_fd[0], 0,
- offsetof(struct flexcan_regs, rx_smb1_fd[17]) -
- offsetof(struct flexcan_regs, tx_smb_fd[0]) + 0x4);
+ memset_io(&regs->init_fd, 0, sizeof(regs->init_fd));
reg_ctrl2 &= ~FLEXCAN_CTRL2_WRMFRZ;
priv->write(reg_ctrl2, &regs->ctrl2);
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 308d4f2fff00..eee47bad0592 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -32,8 +32,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+ void __iomem *src = priv->mram_base + offset;
- ioread32_rep(priv->mram_base + offset, val, val_count);
+ while (val_count--) {
+ *(unsigned int *)val = ioread32(src);
+ val += 4;
+ src += 4;
+ }
return 0;
}
@@ -51,8 +56,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
const void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+ void __iomem *dst = priv->mram_base + offset;
- iowrite32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ iowrite32(*(unsigned int *)val, dst);
+ val += 4;
+ dst += 4;
+ }
return 0;
}
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 00e4533c8bdd..8999ec9455ec 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
struct rcar_can_priv *priv = netdev_priv(ndev);
u16 ctlr;
- if (netif_running(ndev)) {
- netif_stop_queue(ndev);
- netif_device_detach(ndev);
- }
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+
ctlr = readw(&priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_CANM_HALT;
writew(ctlr, &priv->regs->ctlr);
@@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
u16 ctlr;
int err;
+ if (!netif_running(ndev))
+ return 0;
+
err = clk_enable(priv->clk);
if (err) {
netdev_err(ndev, "clk_enable() failed, error %d\n", err);
@@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- if (netif_running(ndev)) {
- netif_device_attach(ndev);
- netif_start_queue(ndev);
- }
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+
return 0;
}
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6db90dc4bc9d..84f34020aafb 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -752,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
struct net_device *prev_dev = chan->prev_dev;
dev_info(&pdev->dev, "removing device %s\n", dev->name);
+ /* do that only for first channel */
+ if (!prev_dev && chan->pciec_card)
+ peak_pciec_remove(chan->pciec_card);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
dev = prev_dev;
- if (!dev) {
- /* do that only for first channel */
- if (chan->pciec_card)
- peak_pciec_remove(chan->pciec_card);
+ if (!dev)
break;
- }
priv = netdev_priv(dev);
chan = priv->priv;
}
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.h b/drivers/net/can/usb/etas_es58x/es581_4.h
index 4bc60a6df697..667ecb77168c 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.h
+++ b/drivers/net/can/usb/etas_es58x/es581_4.h
@@ -192,7 +192,7 @@ struct es581_4_urb_cmd {
struct es581_4_rx_cmd_ret rx_cmd_ret;
__le64 timestamp;
u8 rx_cmd_ret_u8;
- u8 raw_msg[0];
+ DECLARE_FLEX_ARRAY(u8, raw_msg);
} __packed;
__le16 reserved_for_crc16_do_not_use;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h
index a191891b8777..c4b19a6a33ae 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h
@@ -219,7 +219,7 @@ struct es58x_fd_urb_cmd {
struct es58x_fd_tx_ack_msg tx_ack_msg;
__le64 timestamp;
__le32 rx_cmd_ret_le32;
- u8 raw_msg[0];
+ DECLARE_FLEX_ARRAY(u8, raw_msg);
} __packed;
__le16 reserved_for_crc16_do_not_use;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index b11eabad575b..09029a3bad1a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
new_state = CAN_STATE_ERROR_WARNING;
} else {
- /* no error bit (so, no error skb, back to active state) */
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ /* back to (or still in) ERROR_ACTIVE state */
+ new_state = CAN_STATE_ERROR_ACTIVE;
pdev->bec.txerr = 0;
pdev->bec.rxerr = 0;
- return 0;
}
/* state hasn't changed */
@@ -568,8 +567,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(netdev, &cf);
- if (skb)
- can_change_state(netdev, cf, tx_state, rx_state);
+ can_change_state(netdev, cf, tx_state, rx_state);
/* things must be done even in case of OOM */
if (new_state == CAN_STATE_BUS_OFF)
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 3ff4b7e177f3..dbd4486a173f 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -230,7 +230,7 @@
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 094737e5084a..9890672a206d 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1035,9 +1035,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return 0;
-
mutex_lock(&priv->reg_mutex);
/* Allow the user port gets connected to the cpu port and also
@@ -1060,9 +1057,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return;
-
mutex_lock(&priv->reg_mutex);
/* Clear up all port matrix which could be restored in the next
@@ -3211,7 +3205,7 @@ mt7530_probe(struct mdio_device *mdiodev)
return -ENOMEM;
priv->ds->dev = &mdiodev->dev;
- priv->ds->num_ports = DSA_MAX_PORTS;
+ priv->ds->num_ports = MT7530_NUM_PORTS;
/* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 228a5db7e143..217ff597cdf2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -159,10 +159,10 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
}
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
- for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
+ for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw.cfg)) {
int tc;
- memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
+ memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg));
if (i == 0)
cos2bw.queue_id = resp->queue_id0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 6eed231de565..716742522161 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -23,13 +23,15 @@ struct bnxt_dcb {
struct bnxt_cos2bw_cfg {
u8 pad[3];
- u8 queue_id;
- __le32 min_bw;
- __le32 max_bw;
+ struct_group_attr(cfg, __packed,
+ u8 queue_id;
+ __le32 min_bw;
+ __le32 max_bw;
#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- u8 tsa;
- u8 pri_lvl;
- u8 bw_weight;
+ u8 tsa;
+ u8 pri_lvl;
+ u8 bw_weight;
+ );
u8 unused;
};
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 691e1475d55e..0fbecd093fa1 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)
dev_err(&nic->pdev->dev,
"Request for #%d msix vectors failed, returned %d\n",
nic->num_vec, ret);
- return 1;
+ return ret;
}
/* Register mailbox interrupt handler */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index d1667b759522..a27227aeae88 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1224,7 +1224,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
if (ret < 0) {
netdev_err(nic->netdev,
"Req for #%d msix vectors failed\n", nic->num_vec);
- return 1;
+ return ret;
}
sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
@@ -1243,7 +1243,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
if (!nicvf_check_pf_ready(nic)) {
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
nicvf_unregister_interrupts(nic);
- return 1;
+ return -EIO;
}
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 9690e36e9e85..910b9f722504 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -157,7 +157,7 @@ static const struct {
{ ENETC_PM0_TFRM, "MAC tx frames" },
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frames" },
+ { ENETC_PM0_TERR, "MAC tx frame errors" },
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 4c977dfc44f0..d522bd5c90b4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
static void enetc_configure_port_mac(struct enetc_hw *hw)
{
+ int tc;
+
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
- enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ for (tc = 0; tc < 8; tc++)
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index eef1b2764d34..67b0bf310daa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
static LIST_HEAD(hnae3_client_list);
static LIST_HEAD(hnae3_ae_dev_list);
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *pci_id;
+ struct hnae3_ae_dev *ae_dev;
+
+ if (!ae_algo)
+ return;
+
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!pci_id)
+ continue;
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ pci_disable_sriov(ae_dev->pdev);
+ }
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+
/* we are keeping things simple and using single lock for all the
* list. This is a non-critical code so other updations, if happen
* in parallel, can wait.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 8ba21d6dc220..d701451596c8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -853,6 +853,7 @@ struct hnae3_handle {
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 2b66c59f5eaf..e54f96251fea 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -137,7 +137,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@ -256,7 +256,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@ -298,7 +298,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{
@@ -462,7 +462,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {
{ "TAIL", 2 },
{ "HEAD", 2 },
{ "FBDNUM", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "COPYBREAK", 2 },
{ "RING_EN", 2 },
{ "RX_RING_EN", 2 },
@@ -565,7 +565,7 @@ static const struct hns3_dbg_item tx_queue_info_items[] = {
{ "HEAD", 2 },
{ "FBDNUM", 2 },
{ "OFFSET", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "RING_EN", 2 },
{ "TX_RING_EN", 2 },
{ "BASE_ADDR", 10 },
@@ -790,13 +790,13 @@ static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
}
static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 5 },
- { "ADDRESS", 2 },
+ { "BD_IDX", 2 },
+ { "ADDRESS", 13 },
{ "VLAN_TAG", 2 },
{ "SIZE", 2 },
{ "T_CS_VLAN_TSO", 2 },
{ "OT_VLAN_TAG", 3 },
- { "TV", 2 },
+ { "TV", 5 },
{ "OLT_VLAN_LEN", 2 },
{ "PAYLEN_OL4CS", 2 },
{ "BD_FE_SC_VLD", 2 },
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 468b8f07bf47..4b886a13e079 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
static int hns3_skb_linearize(struct hns3_enet_ring *ring,
struct sk_buff *skb,
- u8 max_non_tso_bd_num,
unsigned int bd_num)
{
/* 'bd_num == UINT_MAX' means the skb' fraglist has a
@@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* will not help.
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
- (!skb_is_gso(skb) && skb->len >
- HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
+ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp);
@@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
goto out;
}
- if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
- bd_num))
+ if (hns3_skb_linearize(ring, skb, bd_num))
return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len);
@@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc[i].addr = 0;
+ ring->desc_cb[i].refill = 0;
}
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
@@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
+ ring->desc_cb[i].refill = 1;
return 0;
}
@@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
+ ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
@@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
+ ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
@@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
int ntc = ring->next_to_clean;
int ntu = ring->next_to_use;
+ if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
+ return ring->desc_num;
+
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
-static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+/* Return true if there is any allocation failure */
+static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
int cleand_count)
{
struct hns3_desc_cb *desc_cb;
@@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
ret);
- break;
+
+ writel(i, ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ return true;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+ return false;
}
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
@@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
{
ring->desc[ring->next_to_clean].rx.bd_base_info &=
cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+ ring->desc_cb[ring->next_to_clean].refill = 0;
ring->next_to_clean += 1;
if (unlikely(ring->next_to_clean == ring->desc_num))
@@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
+ bool failure = false;
int recv_pkts = 0;
int err;
@@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
while (recv_pkts < budget) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns3_nic_alloc_rx_buffers(ring, unused_count);
- unused_count = hns3_desc_unused(ring) -
- ring->pending_buf;
+ failure = failure ||
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
+ unused_count = 0;
}
/* Poll one pkt */
@@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
}
out:
- /* Make all data has been write before submit */
- if (unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring, unused_count);
-
- return recv_pkts;
+ return failure ? budget : recv_pkts;
}
static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 6162d9f88e37..f09a61d9c626 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -186,11 +186,9 @@ enum hns3_nic_state {
#define HNS3_MAX_BD_SIZE 65535
#define HNS3_MAX_TSO_BD_NUM 63U
-#define HNS3_MAX_TSO_SIZE \
- (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
+#define HNS3_MAX_TSO_SIZE 1048576U
+#define HNS3_MAX_NON_TSO_SIZE 9728U
-#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
- (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
@@ -332,6 +330,7 @@ struct hns3_desc_cb {
u32 length; /* length of the buffer */
u16 reuse_flag;
+ u16 refill;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 307c9e830510..91cb578f56b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
+ /* The hardware will switch to sp mode if bandwidth is
+ * 0, so limit ets bandwidth must be greater than 0.
+ */
+ if (!ets->tc_tx_bw[i]) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u ets bw cannot be 0\n", i);
+ return -EINVAL;
+ }
+
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR)
*changed = true;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 32f62cd2dd99..9cda8b3562b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -391,7 +391,7 @@ static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u16 qset_id, qset_num;
int ret;
@@ -408,12 +408,12 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%04u %#x %#x %#x %#x\n",
- qset_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2, bitmap->bit3);
+ qset_id, req.bit0, req.bit1, req.bit2,
+ req.bit3);
}
return 0;
@@ -422,7 +422,7 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pri_id, pri_num;
int ret;
@@ -439,12 +439,11 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pri_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@ -453,7 +452,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
@@ -466,12 +465,11 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pg_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@ -511,7 +509,7 @@ static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 port_id = 0;
int ret;
@@ -521,12 +519,12 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- bitmap->bit0);
+ req.bit0);
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- bitmap->bit1);
+ req.bit1);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index bb9b026ae88e..93aa7f2bdc13 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure TM QCN hw errors */
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
- if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
+ if (en) {
+ desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+ }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 07987fb8332e..d811eeefe2c0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -50,6 +50,8 @@
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
+#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
#define HCLGE_NCSI_ERR_INT_EN 0x3
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f5b8d1fee0f1..d891390d492f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2847,33 +2847,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task,
- delay_time);
+ mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
@@ -3491,33 +3487,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
-static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
- affinity_notify);
-
- cpumask_copy(&hdev->affinity_mask, mask);
-}
-
-static void hclge_irq_affinity_release(struct kref *ref)
-{
-}
-
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
{
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
&hdev->affinity_mask);
-
- hdev->affinity_notify.notify = hclge_irq_affinity_notify;
- hdev->affinity_notify.release = hclge_irq_affinity_release;
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
- &hdev->affinity_notify);
}
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
{
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
}
@@ -13052,7 +13029,7 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
@@ -13065,6 +13042,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
+ hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index de6afbcbfbac..69cd8f87b4c8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -944,7 +944,6 @@ struct hclge_dev {
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
struct devlink *devlink;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index f314dbd3ce11..95074e91a846 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+ for (; k < HNAE3_MAX_TC; k++)
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5fdac8685f95..cf00ad7bb881 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2232,6 +2232,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
&hdev->state))
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
@@ -2273,9 +2274,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
hdev->reset_attempts = 0;
hdev->last_reset_time = jiffies;
- while ((hdev->reset_type =
- hclgevf_get_reset_level(hdev, &hdev->reset_pending))
- != HNAE3_NONE_RESET)
+ hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
@@ -3449,6 +3450,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_init_rxd_adv_layout(hdev);
+ set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
+
hdev->last_reset_time = jiffies;
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
@@ -3899,7 +3902,7 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
if (!hclgevf_wq) {
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 883130a9b48f..28288d7e3303 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -146,6 +146,7 @@ enum hclgevf_states {
HCLGEVF_STATE_REMOVING,
HCLGEVF_STATE_NIC_REGISTERED,
HCLGEVF_STATE_ROCE_REGISTERED,
+ HCLGEVF_STATE_SERVICE_INITED,
/* task states */
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 5b2143f4b1f8..3178efd98006 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -113,7 +113,8 @@ enum e1000_boards {
board_pch2lan,
board_pch_lpt,
board_pch_spt,
- board_pch_cnp
+ board_pch_cnp,
+ board_pch_tgp
};
struct e1000_ps_page {
@@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
+extern const struct e1000_info e1000_pch_tgp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 60c582a16821..5e4fc9b4e2ad 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- u32 ctrl_ext, txdctl, snoop;
+ u32 ctrl_ext, txdctl, snoop, fflt_dbg;
s32 ret_val;
u16 i;
@@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
snoop = (u32)~(PCIE_NO_SNOOP_ALL);
e1000e_set_pcie_no_snoop(hw, snoop);
+ /* Enable workaround for packet loss issue on TGP PCH
+ * Do not gate DMA clock from the modPHY block
+ */
+ if (mac->type >= e1000_pch_tgp) {
+ fflt_dbg = er32(FFLT_DBG);
+ fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
+ ew32(FFLT_DBG, fflt_dbg);
+ }
+
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
@@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_tgp_info = {
+ .mac = e1000_pch_tgp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index d6a092e5ee74..2504b11c3169 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -289,6 +289,9 @@
/* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8
+/* Don't gate wake DMA clock */
+#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
+
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 900b3ab998bd..ebcb2a30add0 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_lpt] = &e1000_pch_lpt_info,
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
+ [board_pch_tgp] = &e1000_pch_tgp_info,
};
struct e1000_reg_info {
@@ -7896,28 +7897,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2fb81e359cdf..df5ad4de1f00 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E810C_BACKPLANE:
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
+ case ICE_DEV_ID_E810_XXV_QSFP:
case ICE_DEV_ID_E810_XXV_SFP:
hw->mac_type = ICE_MAC_E810;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 9d8194671f6a..ef4392e6e244 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -21,6 +21,10 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 14afce82ef63..da7288bdc9a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -63,7 +63,8 @@ static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
+ hw->api_min_ver, hw->api_patch);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 06ac9badee77..1ac96dc66d0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1668,7 +1668,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid &&
hw->tnl.tbl[i].type == type &&
- idx--)
+ idx-- == 0)
return i;
WARN_ON_ONCE(1);
@@ -1828,7 +1828,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
- index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+ index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 37c18c66b5c7..e375ac849aec 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -100,9 +100,9 @@ static void ice_display_lag_info(struct ice_lag *lag)
*/
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
{
- struct net_device *event_netdev, *netdev_tmp;
struct netdev_notifier_bonding_info *info;
struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
const char *lag_netdev_name;
event_netdev = netdev_notifier_info_to_dev(ptr);
@@ -123,19 +123,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
goto lag_out;
}
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
- if (!netif_is_ice(netdev_tmp))
- continue;
-
- if (netdev_tmp && netdev_tmp != lag->netdev &&
- lag->peer_netdev != netdev_tmp) {
- dev_hold(netdev_tmp);
- lag->peer_netdev = netdev_tmp;
- }
- }
- rcu_read_unlock();
-
if (bonding_info->slave.state)
ice_lag_set_backup(lag);
else
@@ -319,6 +306,9 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
case NETDEV_BONDING_INFO:
ice_lag_info_event(lag, ptr);
break;
+ case NETDEV_UNREGISTER:
+ ice_lag_unlink(lag, ptr);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index dde9802c6c72..b718e196af2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2841,6 +2841,7 @@ void ice_napi_del(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
+ enum ice_status err;
struct ice_pf *pf;
if (!vsi->back)
@@ -2912,6 +2913,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, err);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -3092,6 +3097,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (ret)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, ret);
ice_vsi_free_q_vectors(vsi);
/* SR-IOV determines needed MSIX resources all at once instead of per
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0d6c143f6653..06fa93e597fb 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4224,6 +4224,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (!pf)
return -ENOMEM;
+ /* initialize Auxiliary index to invalid value */
+ pf->aux_idx = -1;
+
/* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
@@ -4615,7 +4618,8 @@ static void ice_remove(struct pci_dev *pdev)
ice_aq_cancel_waiting_tasks(pf);
ice_unplug_aux_dev(pf);
- ida_free(&ice_aux_ida, pf->aux_idx);
+ if (pf->aux_idx >= 0)
+ ida_free(&ice_aux_ida, pf->aux_idx);
set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
@@ -5016,6 +5020,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 80380aed8882..d1ef3d48a4b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1571,6 +1571,9 @@ err_kworker:
*/
void ice_ptp_release(struct ice_pf *pf)
{
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_cfg_timestamp(pf, false);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 9f07b6641705..2d9b10277186 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -2071,6 +2071,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
}
/**
+ * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its RDMA children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
+}
+
+/**
* ice_get_agg_info - get the aggregator ID
* @hw: pointer to the hardware structure
* @agg_id: aggregator ID
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 9beef8f0ec76..fdf7a5882f07 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -89,6 +89,7 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
/* Tx scheduler rate limiter functions */
enum ice_status
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 4461f8b9a864..4e0203336c6b 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -22,8 +22,8 @@
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
#define IGC_DEV_ID_I225_K2 0x3101
+#define IGC_DEV_ID_I226_K 0x3102
#define IGC_DEV_ID_I225_LMVP 0x5502
-#define IGC_DEV_ID_I226_K 0x5504
#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I226_LM 0x125B
#define IGC_DEV_ID_I226_V 0x125C
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 9338765da048..49d822a98ada 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -226,18 +226,85 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+static void get_lf_str_list(struct rvu_block block, int pcifunc,
+ char *lfs)
+{
+ int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
+
+ for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
+ if (lf >= block.lf.max)
+ break;
+
+ if (block.fn_map[lf] != pcifunc)
+ continue;
+
+ if (lf == prev_lf + 1) {
+ prev_lf = lf;
+ seq = 1;
+ continue;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
+ else
+ len += (len ? sprintf(lfs + len, ",%d", lf) :
+ sprintf(lfs + len, "%d", lf));
+
+ prev_lf = lf;
+ seq = 0;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d", prev_lf);
+
+ lfs[len] = '\0';
+}
+
+static int get_max_column_width(struct rvu *rvu)
+{
+ int index, pf, vf, lf_str_size = 12, buf_size = 256;
+ struct rvu_block block;
+ u16 pcifunc;
+ char *buf;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+
+ get_lf_str_list(block, pcifunc, buf);
+ if (lf_str_size <= strlen(buf))
+ lf_str_size = strlen(buf) + 1;
+ }
+ }
+ }
+
+ kfree(buf);
+ return lf_str_size;
+}
+
/* Dumps current provisioning status of all RVU block LFs */
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
char __user *buffer,
size_t count, loff_t *ppos)
{
- int index, off = 0, flag = 0, go_back = 0, len = 0;
+ int index, off = 0, flag = 0, len = 0, i = 0;
struct rvu *rvu = filp->private_data;
- int lf, pf, vf, pcifunc;
+ int bytes_not_copied = 0;
struct rvu_block block;
- int bytes_not_copied;
- int lf_str_size = 12;
+ int pf, vf, pcifunc;
int buf_size = 2048;
+ int lf_str_size;
char *lfs;
char *buf;
@@ -249,6 +316,9 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
if (!buf)
return -ENOSPC;
+ /* Get the maximum width of a column */
+ lf_str_size = get_max_column_width(rvu);
+
lfs = kzalloc(lf_str_size, GFP_KERNEL);
if (!lfs) {
kfree(buf);
@@ -262,65 +332,69 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
"%-*s", lf_str_size,
rvu->hw->block[index].name);
}
+
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ off = 0;
+ flag = 0;
pcifunc = pf << 10 | vf;
if (!pcifunc)
continue;
if (vf) {
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
- go_back = scnprintf(&buf[off],
- buf_size - 1 - off,
- "%-*s", lf_str_size, lfs);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
} else {
sprintf(lfs, "PF%d", pf);
- go_back = scnprintf(&buf[off],
- buf_size - 1 - off,
- "%-*s", lf_str_size, lfs);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
}
- off += go_back;
- for (index = 0; index < BLKTYPE_MAX; index++) {
+ for (index = 0; index < BLK_COUNT; index++) {
block = rvu->hw->block[index];
if (!strlen(block.name))
continue;
len = 0;
lfs[len] = '\0';
- for (lf = 0; lf < block.lf.max; lf++) {
- if (block.fn_map[lf] != pcifunc)
- continue;
+ get_lf_str_list(block, pcifunc, lfs);
+ if (strlen(lfs))
flag = 1;
- len += sprintf(&lfs[len], "%d,", lf);
- }
- if (flag)
- len--;
- lfs[len] = '\0';
off += scnprintf(&buf[off], buf_size - 1 - off,
"%-*s", lf_str_size, lfs);
- if (!strlen(lfs))
- go_back += lf_str_size;
}
- if (!flag)
- off -= go_back;
- else
- flag = 0;
- off--;
- off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ if (flag) {
+ off += scnprintf(&buf[off],
+ buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer +
+ (i * off),
+ buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
+ }
}
}
- bytes_not_copied = copy_to_user(buffer, buf, off);
+out:
kfree(lfs);
kfree(buf);
-
if (bytes_not_copied)
return -EFAULT;
- *ppos = off;
- return off;
+ return *ppos;
}
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
@@ -504,7 +578,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
if (cmd_buf)
ret = -EINVAL;
- if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
goto qsize_write_done;
}
@@ -1719,6 +1793,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
u16 pcifunc;
char *str;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
if (layer == BAND_PROF_INVAL_LAYER)
continue;
@@ -1768,6 +1846,10 @@ static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
int layer;
char *str;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
seq_puts(m, "\nBandwidth profile resource free count\n");
seq_puts(m, "=====================================\n");
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 9ef4e942e31e..6970540dc470 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2507,6 +2507,9 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
return;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
vlan = &nix_hw->txvlan;
mutex_lock(&vlan->rsrc_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 41684a6c44e9..a88a1a48229f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+int mlx5e_fs_init(struct mlx5e_priv *priv);
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index b4e986818794..4a13ef561587 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -10,6 +10,8 @@
#include "en_tc.h"
#include "rep/tc.h"
#include "rep/neigh.h"
+#include "lag.h"
+#include "lag_mp.h"
struct mlx5e_tc_tun_route_attr {
struct net_device *out_dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 33de8f0092a6..fb5397324aa4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* Pkt: MAC IP ESP IP L4
*
* Transport Mode:
- * SWP: OutL3 InL4
- * InL3
+ * SWP: OutL3 OutL4
* Pkt: MAC IP ESP L4
*
* Tunnel(VXLAN TCP/UDP) over Transport Mode
@@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
return;
if (!xo->inner_ipproto) {
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- if (xo->proto == IPPROTO_UDP)
+ switch (xo->proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ /* IP | ESP | TCP */
+ eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- return;
- }
-
- /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
- switch (xo->inner_ipproto) {
- case IPPROTO_UDP:
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- fallthrough;
- case IPPROTO_TCP:
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- break;
- default:
- break;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset =
+ (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
+ }
}
- return;
}
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index c06b4b938ae7..d226cc5ab1d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
struct mlx5e_flow_table *ft;
int err;
- priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
- if (!priv->fs.vlan)
- return -ENOMEM;
-
ft = &priv->fs.vlan->ft;
ft->num_groups = 0;
@@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
- if (IS_ERR(ft->t)) {
- err = PTR_ERR(ft->t);
- goto err_free_t;
- }
+ if (IS_ERR(ft->t))
+ return PTR_ERR(ft->t);
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
@@ -1221,9 +1215,6 @@ err_free_g:
kfree(ft->g);
err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t);
-err_free_t:
- kvfree(priv->fs.vlan);
- priv->fs.vlan = NULL;
return err;
}
@@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
- kvfree(priv->fs.vlan);
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
@@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
+
+int mlx5e_fs_init(struct mlx5e_priv *priv)
+{
+ priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
+ if (!priv->fs.vlan)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+{
+ kvfree(priv->fs.vlan);
+ priv->fs.vlan = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 09c8b71b186c..41ef6eb70a58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4578,6 +4578,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
+ err = mlx5e_fs_init(priv);
+ if (err) {
+ mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
+ return err;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@@ -4595,6 +4601,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
+ mlx5e_fs_cleanup(priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ba8164792016..129ff7e0d65c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -67,6 +67,8 @@
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
+#include "lag.h"
+#include "lag_mp.h"
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index c63d78eda606..188994d091c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
-/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
- * need to set L3 checksum flag for IPsec
- */
static void
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
- if (skb->encapsulation) {
- eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (xo->inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
- } else {
- sq->stats->csum_partial++;
}
}
@@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
+ if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
+ ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+ return;
+ }
+
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
- } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
- ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
} else
sq->stats->csum_none++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 985e305179d1..c6cc67cb4f6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
err_min_rate:
list_del(&group->list);
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- group->tsar_ix);
- if (err)
+ if (mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ group->tsar_ix))
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
err_sched_elem:
kfree(group);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index ca5690b0a7ab..d2105c1635c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -442,6 +442,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
+ /* VF LAG is in multipath mode, ignore bond change requests */
+ if (mlx5_lag_is_multipath(dev0))
+ return;
+
tracker = ldev->tracker;
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index f239b352a58a..21fdaf708f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -9,20 +9,23 @@
#include "eswitch.h"
#include "lib/mlx5.h"
+static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
+}
+
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
if (!mlx5_lag_is_ready(ldev))
return false;
+ if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
+ return false;
+
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
ldev->pf[MLX5_LAG_P2].dev);
}
-static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
-{
- return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
-}
-
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
index 729c839397a8..dea199e79bed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
@@ -24,12 +24,14 @@ struct lag_mp {
void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
int mlx5_lag_mp_init(struct mlx5_lag *ldev);
void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_LAG_MP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 13b0259f7ea6..fcace73eae40 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
struct sk_buff *skb;
int err;
- elem_info->u.rdq.skb = NULL;
skb = netdev_alloc_skb_ip_align(NULL, buf_len);
if (!skb)
return -ENOMEM;
- /* Assume that wqe was previously zeroed. */
-
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
buf_len, DMA_FROM_DEVICE);
if (err)
@@ -597,21 +594,26 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
struct mlxsw_rx_info rx_info = {};
- char *wqe;
+ char wqe[MLXSW_PCI_WQE_SIZE];
struct sk_buff *skb;
u16 byte_count;
int err;
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- skb = elem_info->u.sdq.skb;
- if (!skb)
- return;
- wqe = elem_info->elem;
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ skb = elem_info->u.rdq.skb;
+ memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err) {
+ dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ goto out;
+ }
+
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
@@ -647,10 +649,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
- memset(wqe, 0, q->elem_size);
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
- if (err)
- dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+out:
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 9e8561cdc32a..4d5a5d6595b3 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1743,6 +1743,16 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&tx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(tx->ring_size *
sizeof(struct lan743x_tx_descriptor),
PAGE_SIZE);
@@ -1934,7 +1944,8 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
index);
}
-static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+ gfp_t gfp)
{
struct net_device *netdev = rx->adapter->netdev;
struct device *dev = &rx->adapter->pdev->dev;
@@ -1948,7 +1959,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
+ skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
if (!skb)
return -ENOMEM;
dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
@@ -2110,7 +2121,8 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
- if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
+ if (lan743x_rx_init_ring_element(rx, rx->last_head,
+ GFP_ATOMIC | GFP_DMA)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@@ -2276,6 +2288,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&rx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(rx->ring_size *
sizeof(struct lan743x_rx_descriptor),
PAGE_SIZE);
@@ -2315,13 +2337,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
- ret = lan743x_rx_init_ring_element(rx, index);
+ ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
if (ret)
goto cleanup;
}
return 0;
cleanup:
+ netif_warn(rx->adapter, ifup, rx->adapter->netdev,
+ "Error allocating memory for LAN743x\n");
+
lan743x_rx_ring_cleanup(rx);
return ret;
}
@@ -3019,6 +3044,8 @@ static int lan743x_pm_resume(struct device *dev)
if (ret) {
netif_err(adapter, probe, adapter->netdev,
"lan743x_hardware_init returned %d\n", ret);
+ lan743x_pci_cleanup(adapter);
+ return ret;
}
/* open netdev when netdev is at running state while resume.
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index cbece6e9bff2..5030dfca3879 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -758,6 +758,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
"port %u: missing serdes\n",
portno);
+ of_node_put(portnp);
goto cleanup_config;
}
config->portno = portno;
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 291ae6817c26..d51f799e4e86 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
err = PTR_ERR(target);
+ of_node_put(portnp);
goto out_teardown;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 11c83a99b014..f469950c7265 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -182,15 +182,21 @@ static int
nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
- unsigned int max_mtu;
+ struct nfp_bpf_vnic *bv;
+ struct bpf_prog *prog;
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return 0;
- max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- if (new_mtu > max_mtu) {
- nn_info(nn, "BPF offload active, MTU over %u not supported\n",
- max_mtu);
+ if (nn->xdp_hw.prog) {
+ prog = nn->xdp_hw.prog;
+ } else {
+ bv = nn->app_priv;
+ prog = bv->tc_prog;
+ }
+
+ if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) {
+ nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");
return -EBUSY;
}
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index d0e17eebddd9..16841bb750b7 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -560,6 +560,8 @@ bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
+bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
+ unsigned int mtu);
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 53851853562c..9d97cd281f18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -481,19 +481,28 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
return 0;
}
+bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
+ unsigned int mtu)
+{
+ unsigned int fw_mtu, pkt_off;
+
+ fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ pkt_off = min(prog->aux->max_pkt_offset, mtu);
+
+ return fw_mtu < pkt_off;
+}
+
static int
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
+ unsigned int max_stack, max_prog_len;
dma_addr_t dma_addr;
void *img;
int err;
- fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
- if (fw_mtu < pkt_off) {
+ if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 2643ea5948f4..154399c5453f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
@@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index d29fe562b3de..c910fa2f40a4 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_device *ndev)
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
- if (ndev->phydev)
- phy_stop(ndev->phydev);
-
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
netif_carrier_off(ndev);
@@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_device *ndev)
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
clk_disable_unprepare(pldat->clk);
return 0;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46a6ff9a782d..2918947dd57c 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -157,6 +157,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8129) },
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
{ PCI_VDEVICE(REALTEK, 0x8161) },
+ { PCI_VDEVICE(REALTEK, 0x8162) },
{ PCI_VDEVICE(REALTEK, 0x8167) },
{ PCI_VDEVICE(REALTEK, 0x8168) },
{ PCI_VDEVICE(NCUBE, 0x8168) },
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 4bd3ef8f3384..c4fe3c48ac46 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -132,16 +132,27 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
SET_BIT(FIBRE);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
SET_BIT(1000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(1000baseX_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
+ SET_BIT(10000baseCR_Full);
+ SET_BIT(10000baseLR_Full);
+ SET_BIT(10000baseSR_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
SET_BIT(40000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(40000baseSR4_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
SET_BIT(100000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(100000baseSR4_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
SET_BIT(25000baseCR_Full);
+ SET_BIT(25000baseSR_Full);
+ }
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
SET_BIT(50000baseCR2_Full);
break;
@@ -192,15 +203,19 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
+ TEST_BIT(1000baseX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
+ TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
+ TEST_BIT(10000baseSR_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
+ TEST_BIT(40000baseSR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (TEST_BIT(100000baseCR4_Full))
+ if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
- if (TEST_BIT(25000baseCR_Full))
+ if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
if (TEST_BIT(50000baseCR2_Full))
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index a39c5143b386..797e51802ccb 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -648,7 +648,7 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
} else if (rc == -EINVAL) {
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
} else if (rc == -EPERM) {
- netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ pci_info(efx->pci_dev, "no PTP support\n");
return rc;
} else {
efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
@@ -824,7 +824,7 @@ static int efx_ptp_disable(struct efx_nic *efx)
* should only have been called during probe.
*/
if (rc == -ENOSYS || rc == -EPERM)
- netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ pci_info(efx->pci_dev, "no PTP support\n");
else if (rc)
efx_mcdi_display_error(efx, MC_CMD_PTP,
MC_CMD_PTP_IN_DISABLE_LEN,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 83dcfcae3d4b..441e7f3e5375 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1057,7 +1057,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
return;
if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
- netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
+ pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");
return;
}
if (count > 0 && count > max_vfs)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index eb3b7bf771d7..3d67d1fa3690 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -736,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
- if (priv->synopsys_id != DWMAC_CORE_5_10)
+ if (priv->synopsys_id < DWMAC_CORE_4_10)
ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 775dcf4ebde5..6b6f28d5b8d5 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -623,16 +623,16 @@ static int receive(struct net_device *dev, int cnt)
/* --------------------------------------------------------------------- */
-#ifdef __i386__
+#if defined(__i386__) && !defined(CONFIG_UML)
#include <asm/msr.h>
#define GETTICK(x) \
({ \
if (boot_cpu_has(X86_FEATURE_TSC)) \
x = (unsigned int)rdtsc(); \
})
-#else /* __i386__ */
+#else /* __i386__ && !CONFIG_UML */
#define GETTICK(x)
-#endif /* __i386__ */
+#endif /* __i386__ && !CONFIG_UML */
static void epp_bh(struct work_struct *work)
{
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f124a8a58bd4..a3bfb156c83d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -243,62 +243,10 @@ static void phy_sanitize_settings(struct phy_device *phydev)
}
}
-int phy_ethtool_ksettings_set(struct phy_device *phydev,
- const struct ethtool_link_ksettings *cmd)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
- u8 autoneg = cmd->base.autoneg;
- u8 duplex = cmd->base.duplex;
- u32 speed = cmd->base.speed;
-
- if (cmd->base.phy_address != phydev->mdio.addr)
- return -EINVAL;
-
- linkmode_copy(advertising, cmd->link_modes.advertising);
-
- /* We make sure that we don't pass unsupported values in to the PHY */
- linkmode_and(advertising, advertising, phydev->supported);
-
- /* Verify the settings we care about. */
- if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
- return -EINVAL;
-
- if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
- return -EINVAL;
-
- if (autoneg == AUTONEG_DISABLE &&
- ((speed != SPEED_1000 &&
- speed != SPEED_100 &&
- speed != SPEED_10) ||
- (duplex != DUPLEX_HALF &&
- duplex != DUPLEX_FULL)))
- return -EINVAL;
-
- phydev->autoneg = autoneg;
-
- if (autoneg == AUTONEG_DISABLE) {
- phydev->speed = speed;
- phydev->duplex = duplex;
- }
-
- linkmode_copy(phydev->advertising, advertising);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- phydev->advertising, autoneg == AUTONEG_ENABLE);
-
- phydev->master_slave_set = cmd->base.master_slave_cfg;
- phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
-
- /* Restart the PHY */
- phy_start_aneg(phydev);
-
- return 0;
-}
-EXPORT_SYMBOL(phy_ethtool_ksettings_set);
-
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd)
{
+ mutex_lock(&phydev->lock);
linkmode_copy(cmd->link_modes.supported, phydev->supported);
linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
@@ -317,6 +265,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.autoneg = phydev->autoneg;
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
cmd->base.eth_tp_mdix = phydev->mdix;
+ mutex_unlock(&phydev->lock);
}
EXPORT_SYMBOL(phy_ethtool_ksettings_get);
@@ -751,7 +700,7 @@ static int phy_check_link_status(struct phy_device *phydev)
}
/**
- * phy_start_aneg - start auto-negotiation for this PHY device
+ * _phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
* Description: Sanitizes the settings (if we're not autonegotiating
@@ -759,25 +708,43 @@ static int phy_check_link_status(struct phy_device *phydev)
* If the PHYCONTROL Layer is operating, we change the state to
* reflect the beginning of Auto-negotiation or forcing.
*/
-int phy_start_aneg(struct phy_device *phydev)
+static int _phy_start_aneg(struct phy_device *phydev)
{
int err;
+ lockdep_assert_held(&phydev->lock);
+
if (!phydev->drv)
return -EIO;
- mutex_lock(&phydev->lock);
-
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
err = phy_config_aneg(phydev);
if (err < 0)
- goto out_unlock;
+ return err;
if (phy_is_started(phydev))
err = phy_check_link_status(phydev);
-out_unlock:
+
+ return err;
+}
+
+/**
+ * phy_start_aneg - start auto-negotiation for this PHY device
+ * @phydev: the phy_device struct
+ *
+ * Description: Sanitizes the settings (if we're not autonegotiating
+ * them), and then calls the driver's config_aneg function.
+ * If the PHYCONTROL Layer is operating, we change the state to
+ * reflect the beginning of Auto-negotiation or forcing.
+ */
+int phy_start_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ mutex_lock(&phydev->lock);
+ err = _phy_start_aneg(phydev);
mutex_unlock(&phydev->lock);
return err;
@@ -800,6 +767,61 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
return ret < 0 ? ret : 0;
}
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ u8 autoneg = cmd->base.autoneg;
+ u8 duplex = cmd->base.duplex;
+ u32 speed = cmd->base.speed;
+
+ if (cmd->base.phy_address != phydev->mdio.addr)
+ return -EINVAL;
+
+ linkmode_copy(advertising, cmd->link_modes.advertising);
+
+ /* We make sure that we don't pass unsupported values in to the PHY */
+ linkmode_and(advertising, advertising, phydev->supported);
+
+ /* Verify the settings we care about. */
+ if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
+ return -EINVAL;
+
+ if (autoneg == AUTONEG_DISABLE &&
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
+ (duplex != DUPLEX_HALF &&
+ duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ mutex_lock(&phydev->lock);
+ phydev->autoneg = autoneg;
+
+ if (autoneg == AUTONEG_DISABLE) {
+ phydev->speed = speed;
+ phydev->duplex = duplex;
+ }
+
+ linkmode_copy(phydev->advertising, advertising);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising, autoneg == AUTONEG_ENABLE);
+
+ phydev->master_slave_set = cmd->base.master_slave_cfg;
+ phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+
+ /* Restart the PHY */
+ _phy_start_aneg(phydev);
+
+ mutex_unlock(&phydev->lock);
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_set);
+
/**
* phy_speed_down - set speed to lowest speed supported by both link partners
* @phydev: the phy_device struct
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index f87f17503373..b554054a7560 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -117,6 +117,7 @@ config USB_LAN78XX
select PHYLIB
select MICROCHIP_PHY
select FIXED_PHY
+ select CRC32
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 793f8fbe0069..63cd72c5f580 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -4122,6 +4122,12 @@ static int lan78xx_probe(struct usb_interface *intf,
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ /* Reject broken descriptors. */
+ if (dev->maxpacket == 0) {
+ ret = -ENODEV;
+ goto out4;
+ }
+
/* driver requires remote-wakeup capability during autosuspend. */
intf->needs_remote_wakeup = 1;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 840c1c2ab16a..a33d7fb82a00 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1788,6 +1788,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if (!dev->rx_urb_size)
dev->rx_urb_size = dev->hard_mtu;
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+ if (dev->maxpacket == 0) {
+ /* that is a broken device */
+ status = -ENODEV;
+ goto out4;
+ }
/* let userspace know we have a random address */
if (ether_addr_equal(net->dev_addr, node_id))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 142f70670f5c..8799854bacb2 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3833,7 +3833,6 @@ vmxnet3_suspend(struct device *device)
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
- netif_tx_stop_all_queues(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index bf2fac913942..662e26117353 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1360,8 +1360,6 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
- nf_reset_ct(skb);
-
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb.
* For strict packets with a source LLA, determine the dst using the
@@ -1424,8 +1422,6 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
- nf_reset_ct(skb);
-
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
goto out;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index f6fadcbdd86e..0685c0d2d4ea 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -109,7 +109,7 @@ struct bmi_cmd {
struct {
__le32 addr;
__le32 len;
- u8 payload[0];
+ u8 payload[];
} write_mem;
struct {
__le32 addr;
@@ -138,18 +138,18 @@ struct bmi_cmd {
} rompatch_uninstall;
struct {
__le32 count;
- __le32 patch_ids[0]; /* length of @count */
+ __le32 patch_ids[]; /* length of @count */
} rompatch_activate;
struct {
__le32 count;
- __le32 patch_ids[0]; /* length of @count */
+ __le32 patch_ids[]; /* length of @count */
} rompatch_deactivate;
struct {
__le32 addr;
} lz_start;
struct {
__le32 len; /* max BMI_MAX_DATA_SIZE */
- u8 payload[0]; /* length of @len */
+ u8 payload[]; /* length of @len */
} lz_data;
struct {
u8 name[BMI_NVRAM_SEG_NAME_SZ];
@@ -160,7 +160,7 @@ struct bmi_cmd {
union bmi_resp {
struct {
- u8 payload[0];
+ DECLARE_FLEX_ARRAY(u8, payload);
} read_mem;
struct {
__le32 result;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index ec689e3ce48a..a6de08d3bf4a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1674,8 +1674,11 @@ struct htt_tx_fetch_ind {
__le32 token;
__le16 num_resp_ids;
__le16 num_records;
- __le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
- struct htt_tx_fetch_record records[];
+ union {
+ /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
+ DECLARE_FLEX_ARRAY(__le32, resp_ids);
+ DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records);
+ };
} __packed;
static inline void *
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
index 89c6671b32bc..4a97310f8fee 100644
--- a/drivers/net/wireless/intel/iwlegacy/commands.h
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -1408,8 +1408,10 @@ struct il3945_tx_cmd {
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- u8 payload[0];
- struct ieee80211_hdr hdr[];
+ union {
+ DECLARE_FLEX_ARRAY(u8, payload);
+ DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
+ };
} __packed;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
index 235c7a2e3483..75a4b8e26232 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -1251,8 +1251,10 @@ struct iwl_tx_cmd {
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- u8 payload[0];
- struct ieee80211_hdr hdr[];
+ union {
+ DECLARE_FLEX_ARRAY(u8, payload);
+ DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
+ };
} __packed;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 24e4a82a55da..5fddfd391941 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -239,8 +239,10 @@ struct iwl_tx_cmd {
u8 tid_tspec;
__le16 pm_frame_timeout;
__le16 reserved4;
- u8 payload[0];
- struct ieee80211_hdr hdr[0];
+ union {
+ DECLARE_FLEX_ARRAY(u8, payload);
+ DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
+ };
} __packed; /* TX_CMD_API_S_VER_6 */
struct iwl_dram_sec_info {
@@ -713,8 +715,10 @@ struct iwl_mvm_compressed_ba_notif {
__le32 tx_rate;
__le16 tfd_cnt;
__le16 ra_tid_cnt;
- struct iwl_mvm_compressed_ba_ratid ra_tid[0];
- struct iwl_mvm_compressed_ba_tfd tfd[];
+ union {
+ DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid);
+ DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd);
+ };
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
/**
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e31b98403f31..fc41ba95f81d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1730,6 +1730,10 @@ static int netfront_resume(struct xenbus_device *dev)
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ netif_tx_lock_bh(info->netdev);
+ netif_device_detach(info->netdev);
+ netif_tx_unlock_bh(info->netdev);
+
xennet_disconnect_backend(info);
return 0;
}
@@ -2349,6 +2353,10 @@ static int xennet_connect(struct net_device *dev)
* domain a kick because we've probably just requeued some
* packets.
*/
+ netif_tx_lock_bh(np->netdev);
+ netif_device_attach(np->netdev);
+ netif_tx_unlock_bh(np->netdev);
+
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 517376c43b86..16ceb763594f 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -1006,11 +1006,11 @@ static u64 port100_get_command_type_mask(struct port100 *dev)
skb = port100_alloc_skb(dev, 0);
if (!skb)
- return -ENOMEM;
+ return 0;
resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
if (IS_ERR(resp))
- return PTR_ERR(resp);
+ return 0;
if (resp->len < 8)
mask = 0;
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index d16cf3ff644e..b23f47936473 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1226,11 +1226,9 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
&reset_cmd,
ST95HF_RESET_CMD_LEN,
ASYNC);
- if (result) {
+ if (result)
dev_err(&spictx->spidev->dev,
"ST95HF reset failed in remove() err = %d\n", result);
- return result;
- }
/* wait for 3 ms to complete the controller reset process */
usleep_range(3000, 4000);
@@ -1239,7 +1237,7 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
if (stcontext->st95hf_supply)
regulator_disable(stcontext->st95hf_supply);
- return result;
+ return 0;
}
/* Register as SPI protocol driver */
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 088d3dd6f6fa..b6c6866f9259 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -162,7 +162,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
return err;
}
-static blk_qc_t nd_blk_submit_bio(struct bio *bio)
+static void nd_blk_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data;
@@ -173,7 +173,7 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio)
bool do_acct;
if (!bio_integrity_prep(bio))
- return BLK_QC_T_NONE;
+ return;
bip = bio_integrity(bio);
rw = bio_data_dir(bio);
@@ -199,7 +199,6 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio)
bio_end_io_acct(bio, start);
bio_endio(bio);
- return BLK_QC_T_NONE;
}
static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 92dec4952297..4295fa809420 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1440,7 +1440,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
return ret;
}
-static blk_qc_t btt_submit_bio(struct bio *bio)
+static void btt_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct btt *btt = bio->bi_bdev->bd_disk->private_data;
@@ -1451,7 +1451,7 @@ static blk_qc_t btt_submit_bio(struct bio *bio)
bool do_acct;
if (!bio_integrity_prep(bio))
- return BLK_QC_T_NONE;
+ return;
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
@@ -1483,7 +1483,6 @@ static blk_qc_t btt_submit_bio(struct bio *bio)
bio_end_io_acct(bio, start);
bio_endio(bio);
- return BLK_QC_T_NONE;
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7de592d7eff4..6a45fa91e8a3 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/ndctl.h>
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ef4950f80832..c74d7bceb222 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -190,7 +190,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem,
return rc;
}
-static blk_qc_t pmem_submit_bio(struct bio *bio)
+static void pmem_submit_bio(struct bio *bio)
{
int ret = 0;
blk_status_t rc = 0;
@@ -229,7 +229,6 @@ static blk_qc_t pmem_submit_bio(struct bio *bio)
bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio);
- return BLK_QC_T_NONE;
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
@@ -333,26 +332,6 @@ static const struct attribute_group *pmem_attribute_groups[] = {
NULL,
};
-static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
-{
- struct pmem_device *pmem = pgmap->owner;
-
- blk_cleanup_disk(pmem->disk);
-}
-
-static void pmem_release_queue(void *pgmap)
-{
- pmem_pagemap_cleanup(pgmap);
-}
-
-static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
-{
- struct request_queue *q =
- container_of(pgmap->ref, struct request_queue, q_usage_counter);
-
- blk_freeze_queue_start(q);
-}
-
static void pmem_release_disk(void *__pmem)
{
struct pmem_device *pmem = __pmem;
@@ -360,12 +339,9 @@ static void pmem_release_disk(void *__pmem)
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
del_gendisk(pmem->disk);
-}
-static const struct dev_pagemap_ops fsdax_pagemap_ops = {
- .kill = pmem_pagemap_kill,
- .cleanup = pmem_pagemap_cleanup,
-};
+ blk_cleanup_disk(pmem->disk);
+}
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
@@ -427,10 +403,8 @@ static int pmem_attach_disk(struct device *dev,
pmem->disk = disk;
pmem->pgmap.owner = pmem;
pmem->pfn_flags = PFN_DEV;
- pmem->pgmap.ref = &q->q_usage_counter;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
- pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -444,16 +418,12 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
- pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
} else {
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
- if (devm_add_action_or_reset(dev, pmem_release_queue,
- &pmem->pgmap))
- return -ENOMEM;
bb_range.start = res->start;
bb_range.end = res->end;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f8dd664b2eda..838b5e2058be 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -6,6 +6,7 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
+#include <linux/blk-integrity.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -118,25 +119,6 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
struct nvme_command *cmd);
-/*
- * Prepare a queue for teardown.
- *
- * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
- * the capacity to 0 after that to avoid blocking dispatchers that may be
- * holding bd_butex. This will end buffered writers dirtying pages that can't
- * be synced.
- */
-static void nvme_set_queue_dying(struct nvme_ns *ns)
-{
- if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
- return;
-
- blk_set_queue_dying(ns->queue);
- blk_mq_unquiesce_queue(ns->queue);
-
- set_capacity_and_notify(ns->disk, 0);
-}
-
void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
/*
@@ -221,7 +203,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
dev_info(ctrl->device,
- "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
+ "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
flush_work(&ctrl->reset_work);
nvme_stop_ctrl(ctrl);
@@ -345,15 +327,19 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
return RETRY;
}
-static inline void nvme_end_req(struct request *req)
+static inline void nvme_end_req_zoned(struct request *req)
{
- blk_status_t status = nvme_error_status(nvme_req(req)->status);
-
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
+}
+
+static inline void nvme_end_req(struct request *req)
+{
+ blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@@ -380,6 +366,13 @@ void nvme_complete_rq(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
+void nvme_complete_batch_req(struct request *req)
+{
+ nvme_cleanup_cmd(req);
+ nvme_end_req_zoned(req);
+}
+EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
+
/*
* Called to unwind from ->queue_rq on a failed command submission so that the
* multipathing code gets called to potentially failover to another path.
@@ -631,7 +624,7 @@ static inline void nvme_init_request(struct request *req,
req->cmd_flags |= REQ_FAILFAST_DRIVER;
if (req->mq_hctx->type == HCTX_TYPE_POLL)
- req->cmd_flags |= REQ_HIPRI;
+ req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}
@@ -822,6 +815,7 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
+ memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
}
@@ -873,6 +867,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_IOERR;
}
+ memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->dsm.nr = cpu_to_le32(segments - 1);
@@ -889,6 +884,8 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd)
{
+ memset(cmnd, 0, sizeof(*cmnd));
+
if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
return nvme_setup_discard(ns, req, cmnd);
@@ -922,9 +919,15 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
cmnd->rw.opcode = op;
+ cmnd->rw.flags = 0;
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
+ cmnd->rw.rsvd2 = 0;
+ cmnd->rw.metadata = 0;
cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ cmnd->rw.reftag = 0;
+ cmnd->rw.apptag = 0;
+ cmnd->rw.appmask = 0;
if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
@@ -981,10 +984,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
- if (!(req->rq_flags & RQF_DONTPREP)) {
+ if (!(req->rq_flags & RQF_DONTPREP))
nvme_clear_nvme_request(req);
- memset(cmd, 0, sizeof(*cmd));
- }
switch (req_op(req)) {
case REQ_OP_DRV_IN:
@@ -2600,6 +2601,24 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
+static ssize_t nvme_subsys_show_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ switch (subsys->subtype) {
+ case NVME_NQN_DISC:
+ return sysfs_emit(buf, "discovery\n");
+ case NVME_NQN_NVME:
+ return sysfs_emit(buf, "nvm\n");
+ default:
+ return sysfs_emit(buf, "reserved\n");
+ }
+}
+static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
+
#define nvme_subsys_show_str_function(field) \
static ssize_t subsys_##field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -2620,6 +2639,7 @@ static struct attribute *nvme_subsys_attrs[] = {
&subsys_attr_serial.attr,
&subsys_attr_firmware_rev.attr,
&subsys_attr_subsysnqn.attr,
+ &subsys_attr_subsystype.attr,
#ifdef CONFIG_NVME_MULTIPATH
&subsys_attr_iopolicy.attr,
#endif
@@ -2690,6 +2710,21 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
+
+ /* Versions prior to 1.4 don't necessarily report a valid type */
+ if (id->cntrltype == NVME_CTRL_DISC ||
+ !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
+ subsys->subtype = NVME_NQN_DISC;
+ else
+ subsys->subtype = NVME_NQN_NVME;
+
+ if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
+ dev_err(ctrl->device,
+ "Subsystem %s is not a discovery controller",
+ subsys->subnqn);
+ kfree(subsys);
+ return -EINVAL;
+ }
subsys->awupf = le16_to_cpu(id->awupf);
#ifdef CONFIG_NVME_MULTIPATH
subsys->iopolicy = NVME_IOPOLICY_NUMA;
@@ -4473,6 +4508,37 @@ out:
}
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+static void nvme_start_ns_queue(struct nvme_ns *ns)
+{
+ if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags))
+ blk_mq_unquiesce_queue(ns->queue);
+}
+
+static void nvme_stop_ns_queue(struct nvme_ns *ns)
+{
+ if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
+ blk_mq_quiesce_queue(ns->queue);
+}
+
+/*
+ * Prepare a queue for teardown.
+ *
+ * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
+ * the capacity to 0 after that to avoid blocking dispatchers that may be
+ * holding bd_butex. This will end buffered writers dirtying pages that can't
+ * be synced.
+ */
+static void nvme_set_queue_dying(struct nvme_ns *ns)
+{
+ if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ return;
+
+ blk_set_queue_dying(ns->queue);
+ nvme_start_ns_queue(ns);
+
+ set_capacity_and_notify(ns->disk, 0);
+}
+
/**
* nvme_kill_queues(): Ends all namespace queues
* @ctrl: the dead controller that needs to end
@@ -4488,7 +4554,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
/* Forcibly unquiesce queues to avoid blocking dispatch */
if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
list_for_each_entry(ns, &ctrl->namespaces, list)
nvme_set_queue_dying(ns);
@@ -4551,7 +4617,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
- blk_mq_quiesce_queue(ns->queue);
+ nvme_stop_ns_queue(ns);
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -4562,11 +4628,25 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
- blk_mq_unquiesce_queue(ns->queue);
+ nvme_start_ns_queue(ns);
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
+void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+ blk_mq_quiesce_queue(ctrl->admin_q);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);
+
+void nvme_start_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+ blk_mq_unquiesce_queue(ctrl->admin_q);
+}
+EXPORT_SYMBOL_GPL(nvme_start_admin_queue);
+
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 668c6bb7a567..c5a2b71c5268 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -548,6 +548,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
+ { NVMF_OPT_DISCOVERY, "discovery" },
{ NVMF_OPT_ERR, NULL }
};
@@ -823,6 +824,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->tos = token;
break;
+ case NVMF_OPT_DISCOVERY:
+ opts->discovery_nqn = true;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -949,7 +953,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
- NVMF_OPT_DISABLE_SQFLOW |\
+ NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
NVMF_OPT_FAIL_FAST_TMO)
static struct nvme_ctrl *
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a146cb903869..c3203ff1c654 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -67,6 +67,7 @@ enum {
NVMF_OPT_TOS = 1 << 19,
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
+ NVMF_OPT_DISCOVERY = 1 << 22,
};
/**
@@ -178,6 +179,13 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
return true;
}
+static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->subsys)
+ return ctrl->opts->subsysnqn;
+ return ctrl->subsys->subnqn;
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index aa14ad963d91..71b3108c22f0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -16,6 +16,7 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
+#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
@@ -2382,7 +2383,7 @@ nvme_fc_ctrl_free(struct kref *ref)
list_del(&ctrl->ctrl_list);
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
@@ -2510,7 +2511,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
/*
* clean up the admin queue. Same thing as above.
*/
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -2841,6 +2842,28 @@ nvme_fc_complete_rq(struct request *rq)
nvme_fc_ctrl_put(ctrl);
}
+static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_fc_ctrl *ctrl = set->driver_data;
+ int i;
+
+ for (i = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ if (!map->nr_queues) {
+ WARN_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /* Call LLDD map queue functionality if defined */
+ if (ctrl->lport->ops->map_queues)
+ ctrl->lport->ops->map_queues(&ctrl->lport->localport,
+ map);
+ else
+ blk_mq_map_queues(map);
+ }
+ return 0;
+}
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
@@ -2849,6 +2872,7 @@ static const struct blk_mq_ops nvme_fc_mq_ops = {
.exit_request = nvme_fc_exit_request,
.init_hctx = nvme_fc_init_hctx,
.timeout = nvme_fc_timeout,
+ .map_queues = nvme_fc_map_queues,
};
static int
@@ -3095,7 +3119,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
(ilog2(SZ_4K) - 9);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl);
if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
@@ -3249,7 +3273,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_free_queue(&ctrl->queues[0]);
/* re-enable the admin_q so anything new can fast fail */
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
/* resume the io queues so that things will fast fail */
nvme_start_queues(&ctrl->ctrl);
@@ -3572,7 +3596,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
- ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
return &ctrl->ctrl;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index fba06618c6c2..7f2071f2460c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -85,8 +85,13 @@ void nvme_failover_req(struct request *req)
}
spin_lock_irqsave(&ns->head->requeue_lock, flags);
- for (bio = req->bio; bio; bio = bio->bi_next)
+ for (bio = req->bio; bio; bio = bio->bi_next) {
bio_set_dev(bio, ns->head->disk->part0);
+ if (bio->bi_opf & REQ_POLLED) {
+ bio->bi_opf &= ~REQ_POLLED;
+ bio->bi_cookie = BLK_QC_T_NONE;
+ }
+ }
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
@@ -100,8 +105,11 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->head->disk)
- kblockd_schedule_work(&ns->head->requeue_work);
+ if (!ns->head->disk)
+ continue;
+ kblockd_schedule_work(&ns->head->requeue_work);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
}
@@ -138,13 +146,12 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->scan_lock);
down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- if (nvme_mpath_clear_current_path(ns))
- kblockd_schedule_work(&ns->head->requeue_work);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ nvme_mpath_clear_current_path(ns);
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
up_read(&ctrl->namespaces_rwsem);
- mutex_unlock(&ctrl->scan_lock);
}
void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
@@ -312,12 +319,11 @@ static bool nvme_available_path(struct nvme_ns_head *head)
return false;
}
-static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
+static void nvme_ns_head_submit_bio(struct bio *bio)
{
struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
struct nvme_ns *ns;
- blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
/*
@@ -334,7 +340,7 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
bio->bi_opf |= REQ_NVME_MPATH;
trace_block_bio_remap(bio, disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
- ret = submit_bio_noacct(bio);
+ submit_bio_noacct(bio);
} else if (nvme_available_path(head)) {
dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
@@ -349,7 +355,6 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
}
srcu_read_unlock(&head->srcu, srcu_idx);
- return ret;
}
static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
@@ -479,6 +484,15 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
+ /*
+ * This assumes all controllers that refer to a namespace either
+ * support poll queues or not. That is not a strict guarantee,
+ * but if the assumption is wrong the effect is only suboptimal
+ * performance but not correctness problem.
+ */
+ if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
+ ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
+ blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
/* set to a default value of 512 until the disk is validated */
blk_queue_logical_block_size(head->disk->queue, 512);
@@ -494,13 +508,23 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
static void nvme_mpath_set_live(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
+ int rc;
if (!head->disk)
return;
+ /*
+ * test_and_set_bit() is used because it is protecting against two nvme
+ * paths simultaneously calling device_add_disk() on the same namespace
+ * head.
+ */
if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
- device_add_disk(&head->subsys->dev, head->disk,
- nvme_ns_id_attr_groups);
+ rc = device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+ if (rc) {
+ clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
+ return;
+ }
nvme_add_ns_head_cdev(head);
}
@@ -538,7 +562,7 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
return -EINVAL;
nr_nsids = le32_to_cpu(desc->nnsids);
- nsid_buf_size = nr_nsids * sizeof(__le32);
+ nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
if (WARN_ON_ONCE(desc->grpid == 0))
return -EINVAL;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ed79a6c7e804..b334af8aa264 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -342,6 +342,7 @@ struct nvme_ctrl {
int nr_reconnects;
unsigned long flags;
#define NVME_CTRL_FAILFAST_EXPIRED 0
+#define NVME_CTRL_ADMIN_Q_STOPPED 1
struct nvmf_ctrl_options *opts;
struct page *discard_page;
@@ -372,6 +373,7 @@ struct nvme_subsystem {
char model[40];
char firmware_rev[8];
u8 cmic;
+ enum nvme_subsys_type subtype;
u16 vendor_id;
u16 awupf; /* 0's based awupf value. */
struct ida ns_ida;
@@ -463,6 +465,7 @@ struct nvme_ns {
#define NVME_NS_ANA_PENDING 2
#define NVME_NS_FORCE_RO 3
#define NVME_NS_READY 4
+#define NVME_NS_STOPPED 5
struct cdev cdev;
struct device cdev_device;
@@ -638,6 +641,20 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
}
void nvme_complete_rq(struct request *req);
+void nvme_complete_batch_req(struct request *req);
+
+static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
+ void (*fn)(struct request *rq))
+{
+ struct request *req;
+
+ rq_list_for_each(&iob->req_list, req) {
+ fn(req);
+ nvme_complete_batch_req(req);
+ }
+ blk_mq_end_request_batch(iob);
+}
+
blk_status_t nvme_host_path_error(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
@@ -665,6 +682,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
+void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
+void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 149ecf73df38..ca2ee806d74b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -10,6 +10,7 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
+#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -244,8 +245,15 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
unsigned int mem_size = nvme_dbbuf_size(dev);
- if (dev->dbbuf_dbs)
+ if (dev->dbbuf_dbs) {
+ /*
+ * Clear the dbbuf memory so the driver doesn't observe stale
+ * values from the previous instantiation.
+ */
+ memset(dev->dbbuf_dbs, 0, mem_size);
+ memset(dev->dbbuf_eis, 0, mem_size);
return 0;
+ }
dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
&dev->dbbuf_dbs_dma_addr,
@@ -958,7 +966,7 @@ out_free_cmd:
return ret;
}
-static void nvme_pci_complete_rq(struct request *req)
+static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_dev *dev = iod->nvmeq->dev;
@@ -968,9 +976,19 @@ static void nvme_pci_complete_rq(struct request *req)
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
if (blk_rq_nr_phys_segments(req))
nvme_unmap_data(dev, req);
+}
+
+static void nvme_pci_complete_rq(struct request *req)
+{
+ nvme_pci_unmap_rq(req);
nvme_complete_rq(req);
}
+static void nvme_pci_complete_batch(struct io_comp_batch *iob)
+{
+ nvme_complete_batch(iob, nvme_pci_unmap_rq);
+}
+
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
{
@@ -995,7 +1013,8 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
}
-static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
+static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
__u16 command_id = READ_ONCE(cqe->command_id);
@@ -1022,7 +1041,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
}
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
- if (!nvme_try_complete_req(req, cqe->status, cqe->result))
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+ !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ nvme_pci_complete_batch))
nvme_pci_complete_rq(req);
}
@@ -1038,7 +1059,8 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
}
}
-static inline int nvme_process_cq(struct nvme_queue *nvmeq)
+static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob)
{
int found = 0;
@@ -1049,7 +1071,7 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq)
* the cqe requires a full read memory barrier
*/
dma_rmb();
- nvme_handle_cqe(nvmeq, nvmeq->cq_head);
+ nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head);
nvme_update_cq_head(nvmeq);
}
@@ -1061,9 +1083,13 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq)
static irqreturn_t nvme_irq(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
+ DEFINE_IO_COMP_BATCH(iob);
- if (nvme_process_cq(nvmeq))
+ if (nvme_poll_cq(nvmeq, &iob)) {
+ if (!rq_list_empty(iob.req_list))
+ nvme_pci_complete_batch(&iob);
return IRQ_HANDLED;
+ }
return IRQ_NONE;
}
@@ -1087,11 +1113,11 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
- nvme_process_cq(nvmeq);
+ nvme_poll_cq(nvmeq, NULL);
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
}
-static int nvme_poll(struct blk_mq_hw_ctx *hctx)
+static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_queue *nvmeq = hctx->driver_data;
bool found;
@@ -1100,7 +1126,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx)
return 0;
spin_lock(&nvmeq->cq_poll_lock);
- found = nvme_process_cq(nvmeq);
+ found = nvme_poll_cq(nvmeq, iob);
spin_unlock(&nvmeq->cq_poll_lock);
return found;
@@ -1273,7 +1299,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* Did we miss an interrupt?
*/
if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
- nvme_poll(req->mq_hctx);
+ nvme_poll(req->mq_hctx, NULL);
else
nvme_poll_irqdisable(nvmeq);
@@ -1395,7 +1421,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->dev->online_queues--;
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
- blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
+ nvme_stop_admin_queue(&nvmeq->dev->ctrl);
if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
return 0;
@@ -1433,7 +1459,7 @@ static void nvme_reap_pending_cqes(struct nvme_dev *dev)
for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
spin_lock(&dev->queues[i].cq_poll_lock);
- nvme_process_cq(&dev->queues[i]);
+ nvme_poll_cq(&dev->queues[i], NULL);
spin_unlock(&dev->queues[i].cq_poll_lock);
}
}
@@ -1654,7 +1680,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
* user requests may be waiting on a stopped queue. Start the
* queue to flush these to completion.
*/
- blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+ nvme_start_admin_queue(&dev->ctrl);
blk_cleanup_queue(dev->ctrl.admin_q);
blk_mq_free_tag_set(&dev->admin_tagset);
}
@@ -1688,7 +1714,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
return -ENODEV;
}
} else
- blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+ nvme_start_admin_queue(&dev->ctrl);
return 0;
}
@@ -2623,7 +2649,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (shutdown) {
nvme_start_queues(&dev->ctrl);
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
- blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+ nvme_start_admin_queue(&dev->ctrl);
}
mutex_unlock(&dev->shutdown_lock);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 042c594bc57e..850f84d204d0 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -13,6 +13,7 @@
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-rdma.h>
+#include <linux/blk-integrity.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -918,7 +919,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
else
ctrl->ctrl.max_integrity_segments = 0;
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
@@ -927,7 +928,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
return 0;
out_quiesce_queue:
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]);
@@ -1025,12 +1026,12 @@ out_free_io_queues:
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
if (remove)
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, remove);
}
@@ -1095,11 +1096,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (ctrl->ctrl.icdoff) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
goto destroy_admin;
}
if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device,
"Mandatory keyed sgls are not supported!\n");
goto destroy_admin;
@@ -1111,6 +1114,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
}
+ if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
+ dev_warn(ctrl->ctrl.device,
+ "ctrl sqsize %u > max queue size %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
+ ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
+ }
+
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
dev_warn(ctrl->ctrl.device,
"sqsize %u > ctrl maxcmd %u, clamping down\n",
@@ -1153,7 +1163,7 @@ destroy_io:
nvme_rdma_destroy_io_queues(ctrl, new);
}
destroy_admin:
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
@@ -1193,7 +1203,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
@@ -2105,7 +2115,7 @@ unmap_qe:
return ret;
}
-static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
+static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_rdma_queue *queue = hctx->driver_data;
@@ -2231,7 +2241,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
cancel_delayed_work_sync(&ctrl->reconnect_work);
nvme_rdma_teardown_io_queues(ctrl, shutdown);
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
if (shutdown)
nvme_shutdown_ctrl(&ctrl->ctrl);
else
@@ -2385,7 +2395,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
- ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 3c1c29dd3020..33bc83d8d992 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -926,12 +926,14 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
+ int req_data_len = req->data_len;
while (true) {
struct page *page = nvme_tcp_req_cur_page(req);
size_t offset = nvme_tcp_req_cur_offset(req);
size_t len = nvme_tcp_req_cur_length(req);
bool last = nvme_tcp_pdu_last_send(req, len);
+ int req_data_sent = req->data_sent;
int ret, flags = MSG_DONTWAIT;
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
@@ -958,7 +960,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
* in the request where we don't want to modify it as we may
* compete with the RX path completing the request.
*/
- if (req->data_sent + ret < req->data_len)
+ if (req_data_sent + ret < req_data_len)
nvme_tcp_advance_req(req, ret);
/* fully successful last send in current PDU */
@@ -1048,10 +1050,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
+ size_t offset = req->offset;
int ret;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
- .iov_base = &req->ddgst + req->offset,
+ .iov_base = (u8 *)&req->ddgst + req->offset,
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
};
@@ -1064,7 +1067,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
if (unlikely(ret <= 0))
return ret;
- if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
+ if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
nvme_tcp_done_send_req(queue);
return 1;
}
@@ -1915,7 +1918,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
if (error)
goto out_stop_queue;
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
error = nvme_init_ctrl_finish(ctrl);
if (error)
@@ -1924,7 +1927,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return 0;
out_quiesce_queue:
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0);
@@ -1946,12 +1949,12 @@ out_free_queue:
static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
bool remove)
{
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
if (remove)
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, remove);
}
@@ -1960,7 +1963,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
{
if (ctrl->queue_count <= 1)
return;
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
nvme_sync_io_queues(ctrl);
@@ -2055,7 +2058,7 @@ destroy_io:
nvme_tcp_destroy_io_queues(ctrl, new);
}
destroy_admin:
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
@@ -2098,7 +2101,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
@@ -2116,7 +2119,7 @@ static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
nvme_tcp_teardown_io_queues(ctrl, shutdown);
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
if (shutdown)
nvme_shutdown_ctrl(ctrl);
else
@@ -2429,7 +2432,7 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
return 0;
}
-static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
+static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
@@ -2582,7 +2585,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
- ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index d95010481fce..bfc259e0d7b8 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -233,6 +233,8 @@ out_free:
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *c, enum nvme_zone_mgmt_action action)
{
+ memset(c, 0, sizeof(*c));
+
c->zms.opcode = nvme_cmd_zone_mgmt_send;
c->zms.nsid = cpu_to_le32(ns->head->ns_id);
c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index aa6d84d8848e..6fb24746de06 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -264,7 +264,7 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
desc->state = req->port->ana_state[grpid];
memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
- return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
+ return struct_size(desc, nsids, count);
}
static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
@@ -278,8 +278,8 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
u16 status;
status = NVME_SC_INTERNAL;
- desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
- NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
+ desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
+ GFP_KERNEL);
if (!desc)
goto out;
@@ -374,13 +374,19 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->rab = 6;
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ id->cntrltype = NVME_CTRL_DISC;
+ else
+ id->cntrltype = NVME_CTRL_IO;
+
/*
* XXX: figure out how we can assign a IEEE OUI, but until then
* the safest is to leave it as zeroes.
*/
/* we support multiple ports, multiples hosts and ANA: */
- id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
+ id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
+ NVME_CTRL_CMIC_ANA;
/* Limit MDTS according to transport capability */
if (ctrl->ops->get_mdts)
@@ -536,7 +542,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* Our namespace might always be shared. Not just with other
* controllers, but also with any other user of the block device.
*/
- id->nmic = (1 << 0);
+ id->nmic = NVME_NS_NMIC_SHARED;
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
@@ -1008,7 +1014,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req);
- if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
+ if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index be5d82421e3a..091a0ca16361 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1233,6 +1233,44 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
+static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_disc_subsys->subsysnqn);
+}
+
+static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ char *subsysnqn;
+ int len;
+
+ len = strcspn(page, "\n");
+ if (!len)
+ return -EINVAL;
+
+ subsysnqn = kmemdup_nul(page, len, GFP_KERNEL);
+ if (!subsysnqn)
+ return -ENOMEM;
+
+ /*
+ * The discovery NQN must be different from subsystem NQN.
+ */
+ if (!strcmp(subsysnqn, subsys->subsysnqn)) {
+ kfree(subsysnqn);
+ return -EBUSY;
+ }
+ down_write(&nvmet_config_sem);
+ kfree(nvmet_disc_subsys->subsysnqn);
+ nvmet_disc_subsys->subsysnqn = subsysnqn;
+ up_write(&nvmet_config_sem);
+
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_discovery_nqn);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
char *page)
@@ -1262,6 +1300,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
+ &nvmet_subsys_attr_attr_discovery_nqn,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
@@ -1553,6 +1592,8 @@ static void nvmet_port_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
+ /* Let inflight controllers teardown complete */
+ flush_scheduled_work();
list_del(&port->global_entry);
kfree(port->ana_state);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b8425fa34300..5119c687de68 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1140,7 +1140,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
* should verify iosqes,iocqes are zeroed, however that
* would break backwards compatibility, so don't enforce it.
*/
- if (ctrl->subsys->type != NVME_NQN_DISC &&
+ if (!nvmet_is_disc_subsys(ctrl->subsys) &&
(nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
ctrl->csts = NVME_CSTS_CFS;
@@ -1205,7 +1205,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
/* CC.EN timeout in 500msec units: */
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
- ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+ if (ctrl->ops->get_max_queue_size)
+ ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
+ else
+ ctrl->cap |= NVMET_QUEUE_SIZE - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl);
@@ -1278,7 +1281,7 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
if (subsys->allow_any_host)
return true;
- if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
+ if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
return true;
list_for_each_entry(p, &subsys->hosts, entry) {
@@ -1367,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_init(&ctrl->lock);
ctrl->port = req->port;
+ ctrl->ops = req->ops;
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
@@ -1405,13 +1409,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
}
ctrl->cntlid = ret;
- ctrl->ops = req->ops;
-
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
*/
- if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
+ if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
kato = NVMET_DISC_KATO_MS;
/* keep-alive timeout in seconds */
@@ -1491,7 +1493,8 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
if (!port)
return NULL;
- if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn) ||
+ !strcmp(nvmet_disc_subsys->subsysnqn, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;
@@ -1538,6 +1541,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
subsys->max_qid = NVMET_NR_QUEUES;
break;
case NVME_NQN_DISC:
+ case NVME_NQN_CURR:
subsys->max_qid = 0;
break;
default:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 7aa62bc6ae84..c2162eef8ce1 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -146,7 +146,7 @@ static size_t discovery_log_entries(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
- size_t entries = 0;
+ size_t entries = 1;
list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
@@ -171,6 +171,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
u32 numrec = 0;
u16 status = 0;
void *buffer;
+ char traddr[NVMF_TRADDR_SIZE];
if (!nvmet_check_transfer_len(req, data_len))
return;
@@ -203,15 +204,19 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
status = NVME_SC_INTERNAL;
goto out;
}
-
hdr = buffer;
- list_for_each_entry(p, &req->port->subsystems, entry) {
- char traddr[NVMF_TRADDR_SIZE];
+ nvmet_set_disc_traddr(req, req->port, traddr);
+
+ nvmet_format_discovery_entry(hdr, req->port,
+ nvmet_disc_subsys->subsysnqn,
+ traddr, NVME_NQN_CURR, numrec);
+ numrec++;
+
+ list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
- nvmet_set_disc_traddr(req, req->port, traddr);
nvmet_format_discovery_entry(hdr, req->port,
p->subsys->subsysnqn, traddr,
NVME_NQN_NVME, numrec);
@@ -268,6 +273,8 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
+ id->cntrltype = NVME_CTRL_DISC;
+
/* no limit on data transfer sizes for now */
id->mdts = 0;
id->cntlid = cpu_to_le16(ctrl->cntlid);
@@ -387,7 +394,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
int __init nvmet_init_discovery(void)
{
nvmet_disc_subsys =
- nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
+ nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR);
return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 7d0454cee920..70fb587e9413 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -221,7 +221,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
- pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
+ pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n",
+ nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 0fc2781ab970..70ca9dfc1771 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/module.h>
#include "nvmet.h"
@@ -86,7 +87,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
ns->bdev = NULL;
return ret;
}
- ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
ns->pi_type = 0;
@@ -107,7 +108,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
{
- ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->size = bdev_nr_bytes(ns->bdev);
}
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 1dd1a0fe2e81..6aa30f30b572 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -125,7 +125,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
return call_iter(iocb, &iter);
}
-static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
+static void nvmet_file_io_done(struct kiocb *iocb, long ret)
{
struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
u16 status = NVME_SC_SUCCESS;
@@ -222,7 +222,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
}
complete:
- nvmet_file_io_done(&req->f.iocb, ret, 0);
+ nvmet_file_io_done(&req->f.iocb, ret);
return true;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 0285ccc7541f..eb1094254c82 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -384,6 +384,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = PTR_ERR(ctrl->ctrl.admin_q);
goto out_cleanup_fabrics_q;
}
+ /* reset stopped state for the fresh admin queue */
+ clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
error = nvmf_connect_admin_queue(&ctrl->ctrl);
if (error)
@@ -398,7 +400,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
@@ -428,7 +430,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
nvme_loop_destroy_io_queues(ctrl);
}
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7143c7fa7464..af193423c10b 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -309,6 +309,7 @@ struct nvmet_fabrics_ops {
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
+ u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@@ -576,6 +577,11 @@ static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
return req->sq->ctrl->subsys;
}
+static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
+{
+ return subsys->type != NVME_NQN_NVME;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 891174ccd44b..1deb4043e242 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/atomic.h>
+#include <linux/blk-integrity.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -1818,12 +1819,36 @@ restart:
mutex_unlock(&nvmet_rdma_queue_mutex);
}
+static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
+{
+ struct nvmet_rdma_queue *queue, *tmp;
+ struct nvmet_port *nport = port->nport;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+ queue_list) {
+ if (queue->port != nport)
+ continue;
+
+ list_del_init(&queue->queue_list);
+ __nvmet_rdma_queue_disconnect(queue);
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+}
+
static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{
struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
if (cm_id)
rdma_destroy_id(cm_id);
+
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet. Do it here after the RDMA-CM was destroyed
+ * guarantees that no new queue will be created.
+ */
+ nvmet_rdma_destroy_port_queues(port);
}
static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
@@ -1975,6 +2000,11 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
return NVMET_RDMA_MAX_MDTS;
}
+static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
+{
+ return NVME_RDMA_MAX_QUEUE_SIZE;
+}
+
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
@@ -1986,6 +2016,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
.get_mdts = nvmet_rdma_get_mdts,
+ .get_max_queue_size = nvmet_rdma_get_max_queue_size,
};
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 07ee347ea3f3..84c387e4bf43 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -702,7 +702,7 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
struct nvmet_tcp_queue *queue = cmd->queue;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
- .iov_base = &cmd->exp_ddgst + cmd->offset,
+ .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
.iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
};
int ret;
@@ -1096,7 +1096,7 @@ recv:
}
if (queue->hdr_digest &&
- nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
+ nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
nvmet_tcp_fatal_error(queue); /* fatal */
return -EPROTO;
}
@@ -1428,6 +1428,7 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
+ struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1447,6 +1448,8 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_free_crypto(queue);
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+ page = virt_to_head_page(queue->pf_cache.va);
+ __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
kfree(queue);
}
@@ -1737,6 +1740,17 @@ err_port:
return ret;
}
+static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
+{
+ struct nvmet_tcp_queue *queue;
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ if (queue->port == port)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+}
+
static void nvmet_tcp_remove_port(struct nvmet_port *nport)
{
struct nvmet_tcp_port *port = nport->priv;
@@ -1746,6 +1760,11 @@ static void nvmet_tcp_remove_port(struct nvmet_port *nport)
port->sock->sk->sk_user_data = NULL;
write_unlock_bh(&port->sock->sk->sk_callback_lock);
cancel_work_sync(&port->accept_work);
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet.
+ */
+ nvmet_tcp_destroy_port_queues(port);
sock_release(port->sock);
kfree(port);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 59c1390cdf42..9da8835ba5a5 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -21,6 +21,7 @@
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/memblock.h>
+#include <linux/kmemleak.h>
#include "of_private.h"
@@ -46,6 +47,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
err = memblock_mark_nomap(base, size);
if (err)
memblock_free(base, size);
+ kmemleak_ignore_phys(base);
}
return err;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index a6fbc709913e..87a33ecc2cf1 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -356,6 +356,7 @@ static int db1x_pcmcia_get_status(struct pcmcia_socket *skt,
case 0:
case 2:
status |= SS_3VCARD; /* 3V card */
+ break;
case 3:
break; /* 5V card: set nothing */
default:
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 77522e5efe11..4374af292e6d 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -36,7 +36,7 @@ config ARM_CCI5xx_PMU
config ARM_CCN
tristate "ARM CCN driver support"
- depends on ARM || ARM64
+ depends on ARM || ARM64 || COMPILE_TEST
help
PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
interconnect.
@@ -62,7 +62,8 @@ config ARM_PMU_ACPI
config ARM_SMMU_V3_PMU
tristate "ARM SMMUv3 Performance Monitors Extension"
- depends on ARM64 && ACPI
+ depends on (ARM64 && ACPI) || (COMPILE_TEST && 64BIT)
+ depends on GENERIC_MSI_IRQ_DOMAIN
help
Provides support for the ARM SMMUv3 Performance Monitor Counter
Groups (PMCG), which provide monitoring of transactions passing
@@ -80,7 +81,7 @@ config ARM_DSU_PMU
config FSL_IMX8_DDR_PMU
tristate "Freescale i.MX8 DDR perf monitor"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Provides support for the DDR performance monitor in i.MX8, which
can give information about memory throughput and other related
@@ -108,7 +109,8 @@ config QCOM_L3_PMU
config THUNDERX2_PMU
tristate "Cavium ThunderX2 SoC PMU UNCORE"
- depends on ARCH_THUNDER2 && ARM64 && ACPI && NUMA
+ depends on ARCH_THUNDER2 || COMPILE_TEST
+ depends on NUMA && ACPI
default m
help
Provides support for ThunderX2 UNCORE events.
@@ -116,7 +118,7 @@ config THUNDERX2_PMU
in the DDR4 Memory Controller (DMC).
config XGENE_PMU
- depends on ARCH_XGENE
+ depends on ARCH_XGENE || (COMPILE_TEST && 64BIT)
bool "APM X-Gene SoC PMU"
default n
help
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index 83264ec0a957..bad99d149172 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -27,7 +27,7 @@
#define PA_INT_CLEAR 0x1c7c
#define PA_EVENT_TYPE0 0x1c80
#define PA_PMU_VERSION 0x1cf0
-#define PA_EVENT_CNT0_L 0x1f00
+#define PA_EVENT_CNT0_L 0x1d00
#define PA_EVTYPE_MASK 0xff
#define PA_NR_COUNTERS 0x8
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index fc1a376ee906..05378c0fd8f3 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -487,7 +487,7 @@ static void tx2_uncore_event_update(struct perf_event *event)
new = reg_readl(hwc->event_base);
prev = local64_xchg(&hwc->prev_count, new);
/* handles rollover of 32 bit counter */
- delta = (u32)(((1UL << 32) - prev) + new);
+ delta = (u32)(((1ULL << 32) - prev) + new);
}
/* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index e79690bd8b85..d7f8175d2c1c 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -5,7 +5,6 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -13,7 +12,6 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
#define FLAG_BCM4708 BIT(1)
@@ -24,8 +22,7 @@ struct ns_pinctrl {
struct device *dev;
unsigned int chipset_flag;
struct pinctrl_dev *pctldev;
- struct regmap *regmap;
- u32 offset;
+ void __iomem *base;
struct pinctrl_desc pctldesc;
struct ns_pinctrl_group *groups;
@@ -232,9 +229,9 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
unset |= BIT(pin_number);
}
- regmap_read(ns_pinctrl->regmap, ns_pinctrl->offset, &tmp);
+ tmp = readl(ns_pinctrl->base);
tmp &= ~unset;
- regmap_write(ns_pinctrl->regmap, ns_pinctrl->offset, tmp);
+ writel(tmp, ns_pinctrl->base);
return 0;
}
@@ -266,13 +263,13 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = {
static int ns_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
const struct of_device_id *of_id;
struct ns_pinctrl *ns_pinctrl;
struct pinctrl_desc *pctldesc;
struct pinctrl_pin_desc *pin;
struct ns_pinctrl_group *group;
struct ns_pinctrl_function *function;
+ struct resource *res;
int i;
ns_pinctrl = devm_kzalloc(dev, sizeof(*ns_pinctrl), GFP_KERNEL);
@@ -290,18 +287,12 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
- ns_pinctrl->regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(ns_pinctrl->regmap)) {
- int err = PTR_ERR(ns_pinctrl->regmap);
-
- dev_err(dev, "Failed to map pinctrl regs: %d\n", err);
-
- return err;
- }
-
- if (of_property_read_u32(np, "offset", &ns_pinctrl->offset)) {
- dev_err(dev, "Failed to get register offset\n");
- return -ENOENT;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cru_gpio_control");
+ ns_pinctrl->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ns_pinctrl->base)) {
+ dev_err(dev, "Failed to map pinctrl regs\n");
+ return PTR_ERR(ns_pinctrl->base);
}
memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 8d0f88e9ca88..bae9d429b813 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -840,6 +840,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
+static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+{
+ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
+ u32 pin_reg, mask;
+ int i;
+
+ mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+ BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
+ BIT(WAKE_CNTRL_OFF_S4);
+
+ for (i = 0; i < desc->npins; i++) {
+ int pin = desc->pins[i].number;
+ const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+
+ if (!pd)
+ continue;
+
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+
+ pin_reg = readl(gpio_dev->base + i * 4);
+ pin_reg &= ~mask;
+ writel(pin_reg, gpio_dev->base + i * 4);
+
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ }
+}
+
#ifdef CONFIG_PM_SLEEP
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
{
@@ -976,6 +1004,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio_dev->pctrl);
}
+ /* Disable and mask interrupts */
+ amd_gpio_irq_init(gpio_dev);
+
girq = &gpio_dev->gc.irq;
girq->chip = &amd_gpio_irqchip;
/* This will let us handle the parent IRQ in the driver */
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 68b3886f9f0f..dfd8888a222a 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1644,8 +1644,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
struct stm32_pinctrl_group *g = pctl->groups;
int i;
- for (i = g->pin; i < g->pin + pctl->ngroups; i++)
- stm32_pinctrl_restore_gpio_regs(pctl, i);
+ for (i = 0; i < pctl->ngroups; i++, g++)
+ stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
return 0;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 4dfc52e06704..f9b2d66b0443 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
ptp_cleanup_pin_groups(ptp);
+ kfree(ptp->vclock_index);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
@@ -283,15 +284,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
/* Create a posix clock and link it to the device. */
err = posix_clock_register(&ptp->clock, &ptp->dev);
if (err) {
+ if (ptp->pps_source)
+ pps_unregister_source(ptp->pps_source);
+
+ if (ptp->kworker)
+ kthread_destroy_worker(ptp->kworker);
+
+ put_device(&ptp->dev);
+
pr_err("failed to create posix clock\n");
- goto no_clock;
+ return ERR_PTR(err);
}
return ptp;
-no_clock:
- if (ptp->pps_source)
- pps_unregister_source(ptp->pps_source);
no_pps:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
@@ -321,8 +327,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
- kfree(ptp->vclock_index);
-
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);
diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
index d0096cd7096a..4991054a2135 100644
--- a/drivers/ptp/ptp_kvm_x86.c
+++ b/drivers/ptp/ptp_kvm_x86.c
@@ -31,10 +31,10 @@ int kvm_arch_ptp_init(void)
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
KVM_CLOCK_PAIRING_WALLCLOCK);
- if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
+ if (ret == -KVM_ENOSYS)
return -ENODEV;
- return 0;
+ return ret;
}
int kvm_arch_ptp_get_clock(struct timespec64 *ts)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f6ca2098cc01..86aa4141efa9 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -145,7 +145,7 @@ static inline int regulator_lock_nested(struct regulator_dev *rdev,
mutex_lock(&regulator_nesting_mutex);
- if (ww_ctx || !ww_mutex_trylock(&rdev->mutex)) {
+ if (!ww_mutex_trylock(&rdev->mutex, ww_ctx)) {
if (rdev->mutex_owner == current)
rdev->ref_cnt++;
else
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index be799a5abf8a..b0056ae5d463 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -147,8 +147,8 @@ config RESET_OXNAS
bool
config RESET_PISTACHIO
- bool "Pistachio Reset Driver" if COMPILE_TEST
- default MACH_PISTACHIO
+ bool "Pistachio Reset Driver"
+ depends on MIPS || COMPILE_TEST
help
This enables the reset driver for ImgTec Pistachio SoCs.
diff --git a/drivers/reset/reset-brcmstb-rescal.c b/drivers/reset/reset-brcmstb-rescal.c
index b6f074d6a65f..433fa0c40e47 100644
--- a/drivers/reset/reset-brcmstb-rescal.c
+++ b/drivers/reset/reset-brcmstb-rescal.c
@@ -38,7 +38,7 @@ static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev,
}
ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
- !(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
+ (reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
if (ret) {
dev_err(data->dev, "time out on SATA/PCIe rescal\n");
return ret;
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 2a72f861f798..8c6492e5693c 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -92,3 +92,29 @@ void __init socfpga_reset_init(void)
for_each_matching_node(np, socfpga_early_reset_dt_ids)
a10_reset_init(np);
}
+
+/*
+ * The early driver is problematic, because it doesn't register
+ * itself as a driver. This causes certain device links to prevent
+ * consumer devices from probing. The hacky solution is to register
+ * an empty driver, whose only job is to attach itself to the reset
+ * manager and call probe.
+ */
+static const struct of_device_id socfpga_reset_dt_ids[] = {
+ { .compatible = "altr,rst-mgr", },
+ { /* sentinel */ },
+};
+
+static int reset_simple_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver reset_socfpga_driver = {
+ .probe = reset_simple_probe,
+ .driver = {
+ .name = "socfpga-reset",
+ .of_match_table = socfpga_reset_dt_ids,
+ },
+};
+builtin_platform_driver(reset_socfpga_driver);
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
index 24d3395964cc..4c5bba52b105 100644
--- a/drivers/reset/tegra/reset-bpmp.c
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e34c6cc61983..8e87a31e329d 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2077,12 +2077,15 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
return;
+
+ dasd_path_clear_all_verify(device);
+ dasd_path_clear_all_fcsec(device);
+
rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
if (rc) {
+ dasd_path_add_tbvpm(device, tbvpm);
+ dasd_path_add_fcsecpm(device, fcsecpm);
dasd_device_set_timer(device, 50);
- } else {
- dasd_path_clear_all_verify(device);
- dasd_path_clear_all_fcsec(device);
}
};
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 4691a3c35d72..299001ad9a32 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -201,7 +201,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
struct ccw1 *ccw;
struct dasd_ccw_req *dctl_cqr;
- dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
+ dctl_cqr = dasd_alloc_erp_request(erp->magic, 1,
sizeof(struct DCTL_data),
device);
if (IS_ERR(dctl_cqr)) {
@@ -1652,7 +1652,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
}
/* Build new ERP request including DE/LO */
- erp = dasd_alloc_erp_request((char *) &cqr->magic,
+ erp = dasd_alloc_erp_request(cqr->magic,
2 + 1,/* DE/LO + TIC */
sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data), device);
@@ -2388,7 +2388,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
}
/* allocate additional request block */
- erp = dasd_alloc_erp_request((char *) &cqr->magic,
+ erp = dasd_alloc_erp_request(cqr->magic,
cplength, datasize, device);
if (IS_ERR(erp)) {
if (cqr->retries <= 0) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 460e0f1cca53..8410a25a65c1 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -560,8 +560,8 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
return -EINVAL;
}
pfxdata->format = format;
- pfxdata->base_address = basepriv->ned->unit_addr;
- pfxdata->base_lss = basepriv->ned->ID;
+ pfxdata->base_address = basepriv->conf.ned->unit_addr;
+ pfxdata->base_lss = basepriv->conf.ned->ID;
pfxdata->validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
@@ -736,32 +736,30 @@ dasd_eckd_cdl_reclen(int recid)
return LABEL_SIZE;
}
/* create unique id from private structure. */
-static void create_uid(struct dasd_eckd_private *private)
+static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
{
int count;
- struct dasd_uid *uid;
- uid = &private->uid;
memset(uid, 0, sizeof(struct dasd_uid));
- memcpy(uid->vendor, private->ned->HDA_manufacturer,
+ memcpy(uid->vendor, conf->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
- memcpy(uid->serial, &private->ned->serial,
+ memcpy(uid->serial, &conf->ned->serial,
sizeof(uid->serial) - 1);
EBCASC(uid->serial, sizeof(uid->serial) - 1);
- uid->ssid = private->gneq->subsystemID;
- uid->real_unit_addr = private->ned->unit_addr;
- if (private->sneq) {
- uid->type = private->sneq->sua_flags;
+ uid->ssid = conf->gneq->subsystemID;
+ uid->real_unit_addr = conf->ned->unit_addr;
+ if (conf->sneq) {
+ uid->type = conf->sneq->sua_flags;
if (uid->type == UA_BASE_PAV_ALIAS)
- uid->base_unit_addr = private->sneq->base_unit_addr;
+ uid->base_unit_addr = conf->sneq->base_unit_addr;
} else {
uid->type = UA_BASE_DEVICE;
}
- if (private->vdsneq) {
+ if (conf->vdsneq) {
for (count = 0; count < 16; count++) {
sprintf(uid->vduit+2*count, "%02x",
- private->vdsneq->uit[count]);
+ conf->vdsneq->uit[count]);
}
}
}
@@ -776,10 +774,10 @@ static int dasd_eckd_generate_uid(struct dasd_device *device)
if (!private)
return -ENODEV;
- if (!private->ned || !private->gneq)
+ if (!private->conf.ned || !private->conf.gneq)
return -ENODEV;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- create_uid(private);
+ create_uid(&private->conf, &private->uid);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
@@ -803,14 +801,15 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
* return 0 for match
*/
static int dasd_eckd_compare_path_uid(struct dasd_device *device,
- struct dasd_eckd_private *private)
+ struct dasd_conf *path_conf)
{
struct dasd_uid device_uid;
+ struct dasd_uid path_uid;
- create_uid(private);
+ create_uid(path_conf, &path_uid);
dasd_eckd_get_uid(device, &device_uid);
- return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
+ return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
}
static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
@@ -946,34 +945,34 @@ out_error:
return ret;
}
-static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
+static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
{
struct dasd_sneq *sneq;
int i, count;
- private->ned = NULL;
- private->sneq = NULL;
- private->vdsneq = NULL;
- private->gneq = NULL;
- count = private->conf_len / sizeof(struct dasd_sneq);
- sneq = (struct dasd_sneq *)private->conf_data;
+ conf->ned = NULL;
+ conf->sneq = NULL;
+ conf->vdsneq = NULL;
+ conf->gneq = NULL;
+ count = conf->len / sizeof(struct dasd_sneq);
+ sneq = (struct dasd_sneq *)conf->data;
for (i = 0; i < count; ++i) {
if (sneq->flags.identifier == 1 && sneq->format == 1)
- private->sneq = sneq;
+ conf->sneq = sneq;
else if (sneq->flags.identifier == 1 && sneq->format == 4)
- private->vdsneq = (struct vd_sneq *)sneq;
+ conf->vdsneq = (struct vd_sneq *)sneq;
else if (sneq->flags.identifier == 2)
- private->gneq = (struct dasd_gneq *)sneq;
+ conf->gneq = (struct dasd_gneq *)sneq;
else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
- private->ned = (struct dasd_ned *)sneq;
+ conf->ned = (struct dasd_ned *)sneq;
sneq++;
}
- if (!private->ned || !private->gneq) {
- private->ned = NULL;
- private->sneq = NULL;
- private->vdsneq = NULL;
- private->gneq = NULL;
+ if (!conf->ned || !conf->gneq) {
+ conf->ned = NULL;
+ conf->sneq = NULL;
+ conf->vdsneq = NULL;
+ conf->gneq = NULL;
return -EINVAL;
}
return 0;
@@ -1016,9 +1015,9 @@ static void dasd_eckd_store_conf_data(struct dasd_device *device,
* with the new one if this points to the same data
*/
cdp = device->path[chp].conf_data;
- if (private->conf_data == cdp) {
- private->conf_data = (void *)conf_data;
- dasd_eckd_identify_conf_parts(private);
+ if (private->conf.data == cdp) {
+ private->conf.data = (void *)conf_data;
+ dasd_eckd_identify_conf_parts(&private->conf);
}
ccw_device_get_schid(device->cdev, &sch_id);
device->path[chp].conf_data = conf_data;
@@ -1036,8 +1035,8 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
struct dasd_eckd_private *private = device->private;
int i;
- private->conf_data = NULL;
- private->conf_len = 0;
+ private->conf.data = NULL;
+ private->conf.len = 0;
for (i = 0; i < 8; i++) {
kfree(device->path[i].conf_data);
device->path[i].conf_data = NULL;
@@ -1071,15 +1070,55 @@ static void dasd_eckd_read_fc_security(struct dasd_device *device)
}
}
+static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
+ char *print_uid)
+{
+ struct dasd_uid uid;
+
+ create_uid(conf, &uid);
+ if (strlen(uid.vduit) > 0)
+ snprintf(print_uid, sizeof(*print_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid.vendor, uid.serial, uid.ssid,
+ uid.real_unit_addr, uid.vduit);
+ else
+ snprintf(print_uid, sizeof(*print_uid),
+ "%s.%s.%04x.%02x",
+ uid.vendor, uid.serial, uid.ssid,
+ uid.real_unit_addr);
+}
+
+static int dasd_eckd_check_cabling(struct dasd_device *device,
+ void *conf_data, __u8 lpm)
+{
+ struct dasd_eckd_private *private = device->private;
+ char print_path_uid[60], print_device_uid[60];
+ struct dasd_conf path_conf;
+
+ path_conf.data = conf_data;
+ path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(&path_conf))
+ return 1;
+
+ if (dasd_eckd_compare_path_uid(device, &path_conf)) {
+ dasd_eckd_get_uid_string(&path_conf, print_path_uid);
+ dasd_eckd_get_uid_string(&private->conf, print_device_uid);
+ dev_err(&device->cdev->dev,
+ "Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
+ lpm, print_path_uid, print_device_uid);
+ return 1;
+ }
+
+ return 0;
+}
+
static int dasd_eckd_read_conf(struct dasd_device *device)
{
void *conf_data;
int conf_len, conf_data_saved;
int rc, path_err, pos;
__u8 lpm, opm;
- struct dasd_eckd_private *private, path_private;
- struct dasd_uid *uid;
- char print_path_uid[60], print_device_uid[60];
+ struct dasd_eckd_private *private;
private = device->private;
opm = ccw_device_get_path_mask(device->cdev);
@@ -1109,11 +1148,11 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
if (!conf_data_saved) {
/* initially clear previously stored conf_data */
dasd_eckd_clear_conf_data(device);
- private->conf_data = conf_data;
- private->conf_len = conf_len;
- if (dasd_eckd_identify_conf_parts(private)) {
- private->conf_data = NULL;
- private->conf_len = 0;
+ private->conf.data = conf_data;
+ private->conf.len = conf_len;
+ if (dasd_eckd_identify_conf_parts(&private->conf)) {
+ private->conf.data = NULL;
+ private->conf.len = 0;
kfree(conf_data);
continue;
}
@@ -1123,59 +1162,11 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
*/
dasd_eckd_generate_uid(device);
conf_data_saved++;
- } else {
- path_private.conf_data = conf_data;
- path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
- if (dasd_eckd_identify_conf_parts(
- &path_private)) {
- path_private.conf_data = NULL;
- path_private.conf_len = 0;
- kfree(conf_data);
- continue;
- }
- if (dasd_eckd_compare_path_uid(
- device, &path_private)) {
- uid = &path_private.uid;
- if (strlen(uid->vduit) > 0)
- snprintf(print_path_uid,
- sizeof(print_path_uid),
- "%s.%s.%04x.%02x.%s",
- uid->vendor, uid->serial,
- uid->ssid, uid->real_unit_addr,
- uid->vduit);
- else
- snprintf(print_path_uid,
- sizeof(print_path_uid),
- "%s.%s.%04x.%02x",
- uid->vendor, uid->serial,
- uid->ssid,
- uid->real_unit_addr);
- uid = &private->uid;
- if (strlen(uid->vduit) > 0)
- snprintf(print_device_uid,
- sizeof(print_device_uid),
- "%s.%s.%04x.%02x.%s",
- uid->vendor, uid->serial,
- uid->ssid, uid->real_unit_addr,
- uid->vduit);
- else
- snprintf(print_device_uid,
- sizeof(print_device_uid),
- "%s.%s.%04x.%02x",
- uid->vendor, uid->serial,
- uid->ssid,
- uid->real_unit_addr);
- dev_err(&device->cdev->dev,
- "Not all channel paths lead to "
- "the same device, path %02X leads to "
- "device %s instead of %s\n", lpm,
- print_path_uid, print_device_uid);
- path_err = -EINVAL;
- dasd_path_add_cablepm(device, lpm);
- continue;
- }
- path_private.conf_data = NULL;
- path_private.conf_len = 0;
+ } else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
+ dasd_path_add_cablepm(device, lpm);
+ path_err = -EINVAL;
+ kfree(conf_data);
+ continue;
}
pos = pathmask_to_pos(lpm);
@@ -1197,8 +1188,6 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
}
}
- dasd_eckd_read_fc_security(device);
-
return path_err;
}
@@ -1213,7 +1202,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
return 0;
/* is transport mode supported? */
fcx_in_css = css_general_characteristics.fcx;
- fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
+ fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
fcx_in_features = private->features.feature[40] & 0x80;
tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
@@ -1282,9 +1271,9 @@ static int rebuild_device_uid(struct dasd_device *device,
"returned error %d", rc);
break;
}
- memcpy(private->conf_data, data->rcd_buffer,
+ memcpy(private->conf.data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
- if (dasd_eckd_identify_conf_parts(private)) {
+ if (dasd_eckd_identify_conf_parts(&private->conf)) {
rc = -ENODEV;
} else /* first valid path is enough */
break;
@@ -1299,11 +1288,10 @@ static int rebuild_device_uid(struct dasd_device *device,
static void dasd_eckd_path_available_action(struct dasd_device *device,
struct pe_handler_work_data *data)
{
- struct dasd_eckd_private path_private;
- struct dasd_uid *uid;
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
struct dasd_conf_data *conf_data;
+ struct dasd_conf path_conf;
unsigned long flags;
char print_uid[60];
int rc, pos;
@@ -1367,11 +1355,11 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
*/
memcpy(&path_rcd_buf, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
- path_private.conf_data = (void *) &path_rcd_buf;
- path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
- if (dasd_eckd_identify_conf_parts(&path_private)) {
- path_private.conf_data = NULL;
- path_private.conf_len = 0;
+ path_conf.data = (void *)&path_rcd_buf;
+ path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(&path_conf)) {
+ path_conf.data = NULL;
+ path_conf.len = 0;
continue;
}
@@ -1382,7 +1370,7 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
* the first working path UID will be used as device UID
*/
if (dasd_path_get_opm(device) &&
- dasd_eckd_compare_path_uid(device, &path_private)) {
+ dasd_eckd_compare_path_uid(device, &path_conf)) {
/*
* the comparison was not successful
* rebuild the device UID with at least one
@@ -1396,20 +1384,8 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
*/
if (rebuild_device_uid(device, data) ||
dasd_eckd_compare_path_uid(
- device, &path_private)) {
- uid = &path_private.uid;
- if (strlen(uid->vduit) > 0)
- snprintf(print_uid, sizeof(print_uid),
- "%s.%s.%04x.%02x.%s",
- uid->vendor, uid->serial,
- uid->ssid, uid->real_unit_addr,
- uid->vduit);
- else
- snprintf(print_uid, sizeof(print_uid),
- "%s.%s.%04x.%02x",
- uid->vendor, uid->serial,
- uid->ssid,
- uid->real_unit_addr);
+ device, &path_conf)) {
+ dasd_eckd_get_uid_string(&path_conf, print_uid);
dev_err(&device->cdev->dev,
"The newly added channel path %02X "
"will not be used because it leads "
@@ -1427,6 +1403,14 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
if (conf_data) {
memcpy(conf_data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
+ } else {
+ /*
+ * path is operational but path config data could not
+ * be stored due to low mem condition
+ * add it to the error path mask and schedule a path
+ * verification later that this could be added again
+ */
+ epm |= lpm;
}
pos = pathmask_to_pos(lpm);
dasd_eckd_store_conf_data(device, conf_data, pos);
@@ -1447,7 +1431,10 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
}
dasd_path_add_nppm(device, npm);
dasd_path_add_ppm(device, ppm);
- dasd_path_add_tbvpm(device, epm);
+ if (epm) {
+ dasd_path_add_tbvpm(device, epm);
+ dasd_device_set_timer(device, 50);
+ }
dasd_path_add_cablepm(device, cablepm);
dasd_path_add_nohpfpm(device, hpfpm);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
@@ -1625,8 +1612,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
prssdp = cqr->data;
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
- prssdp->lss = private->ned->ID;
- prssdp->volume = private->ned->unit_addr;
+ prssdp->lss = private->conf.ned->ID;
+ prssdp->volume = private->conf.ned->unit_addr;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
@@ -2085,11 +2072,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
device->path_thrhld = DASD_ECKD_PATH_THRHLD;
device->path_interval = DASD_ECKD_PATH_INTERVAL;
- if (private->gneq) {
+ if (private->conf.gneq) {
value = 1;
- for (i = 0; i < private->gneq->timeout.value; i++)
+ for (i = 0; i < private->conf.gneq->timeout.value; i++)
value = 10 * value;
- value = value * private->gneq->timeout.number;
+ value = value * private->conf.gneq->timeout.number;
/* do not accept useless values */
if (value != 0 && value <= DASD_EXPIRES_MAX)
device->default_expires = value;
@@ -2121,6 +2108,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err3;
+ dasd_eckd_read_fc_security(device);
dasd_path_create_kobjects(device);
/* Read Feature Codes */
@@ -2195,10 +2183,10 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
return;
dasd_alias_disconnect_device_from_lcu(device);
- private->ned = NULL;
- private->sneq = NULL;
- private->vdsneq = NULL;
- private->gneq = NULL;
+ private->conf.ned = NULL;
+ private->conf.sneq = NULL;
+ private->conf.vdsneq = NULL;
+ private->conf.gneq = NULL;
dasd_eckd_clear_conf_data(device);
dasd_path_remove_kobjects(device);
}
@@ -3750,8 +3738,8 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
* subset.
*/
ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
- ras_data->lss = private->ned->ID;
- ras_data->dev_addr = private->ned->unit_addr;
+ ras_data->lss = private->conf.ned->ID;
+ ras_data->dev_addr = private->conf.ned->unit_addr;
ras_data->nr_exts = nr_exts;
if (by_extent) {
@@ -4293,8 +4281,8 @@ static int prepare_itcw(struct itcw *itcw,
memset(&pfxdata, 0, sizeof(pfxdata));
pfxdata.format = 1; /* PFX with LRE */
- pfxdata.base_address = basepriv->ned->unit_addr;
- pfxdata.base_lss = basepriv->ned->ID;
+ pfxdata.base_address = basepriv->conf.ned->unit_addr;
+ pfxdata.base_lss = basepriv->conf.ned->ID;
pfxdata.validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
@@ -4963,9 +4951,9 @@ dasd_eckd_fill_info(struct dasd_device * device,
info->characteristics_size = sizeof(private->rdc_data);
memcpy(info->characteristics, &private->rdc_data,
sizeof(private->rdc_data));
- info->confdata_size = min((unsigned long)private->conf_len,
- sizeof(info->configuration_data));
- memcpy(info->configuration_data, private->conf_data,
+ info->confdata_size = min_t(unsigned long, private->conf.len,
+ sizeof(info->configuration_data));
+ memcpy(info->configuration_data, private->conf.data,
info->confdata_size);
return 0;
}
@@ -5808,6 +5796,8 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
if (rc)
goto out_err;
+ dasd_eckd_read_fc_security(device);
+
rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err;
@@ -5820,15 +5810,7 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
dasd_eckd_get_uid(device, &uid);
if (old_base != uid.base_unit_addr) {
- if (strlen(uid.vduit) > 0)
- snprintf(print_uid, sizeof(print_uid),
- "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
- uid.ssid, uid.base_unit_addr, uid.vduit);
- else
- snprintf(print_uid, sizeof(print_uid),
- "%s.%s.%04x.%02x", uid.vendor, uid.serial,
- uid.ssid, uid.base_unit_addr);
-
+ dasd_eckd_get_uid_string(&private->conf, print_uid);
dev_info(&device->cdev->dev,
"An Alias device was reassigned to a new base device "
"with UID: %s\n", print_uid);
@@ -5966,8 +5948,8 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
/* LSS and Volume that will be queried */
- prssdp->lss = private->ned->ID;
- prssdp->volume = private->ned->unit_addr;
+ prssdp->lss = private->conf.ned->ID;
+ prssdp->volume = private->conf.ned->unit_addr;
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 65e4630ad2ae..a91b265441cc 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -658,16 +658,19 @@ struct dasd_conf_data {
struct dasd_gneq gneq;
} __packed;
-struct dasd_eckd_private {
- struct dasd_eckd_characteristics rdc_data;
- u8 *conf_data;
- int conf_len;
-
+struct dasd_conf {
+ u8 *data;
+ int len;
/* pointers to specific parts in the conf_data */
struct dasd_ned *ned;
struct dasd_sneq *sneq;
struct vd_sneq *vdsneq;
struct dasd_gneq *gneq;
+};
+
+struct dasd_eckd_private {
+ struct dasd_eckd_characteristics rdc_data;
+ struct dasd_conf conf;
struct eckd_count count_area[5];
int init_cqr_status;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index ba4fa372d02d..c07e6e713518 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -24,7 +24,7 @@
#include "dasd_int.h"
struct dasd_ccw_req *
-dasd_alloc_erp_request(char *magic, int cplength, int datasize,
+dasd_alloc_erp_request(unsigned int magic, int cplength, int datasize,
struct dasd_device * device)
{
unsigned long flags;
@@ -33,8 +33,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
int size;
/* Sanity checks */
- BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
- (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
+ BUG_ON(datasize > PAGE_SIZE ||
+ (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0)
@@ -62,7 +62,7 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
cqr->data = data;
memset(cqr->data, 0, datasize);
}
- strncpy((char *) &cqr->magic, magic, 4);
+ cqr->magic = magic;
ASCEBC((char *) &cqr->magic, 4);
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index fa966e0db6ca..3a6f3af240fa 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -14,6 +14,7 @@
#define KMSG_COMPONENT "dasd"
#include <linux/interrupt.h>
+#include <linux/major.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 155428bfed8a..8b458010f88a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -887,7 +887,7 @@ void dasd_proc_exit(void);
/* externals in dasd_erp.c */
struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
-struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
+struct dasd_ccw_req *dasd_alloc_erp_request(unsigned int, int, int,
struct dasd_device *);
void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
@@ -1305,6 +1305,15 @@ static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
dasd_path_preferred(device, chp);
}
+static inline void dasd_path_add_fcsecpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_fcsec(device, chp);
+}
+
/*
* set functions for path masks
* the existing path mask will be replaced by the given path mask
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 468cbeb539ff..95349f95758c 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -650,8 +650,8 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
/**
* dasd_biodasdinfo() - fill out the dasd information structure
- * @disk [in]: pointer to gendisk structure that references a DASD
- * @info [out]: pointer to the dasd_information2_t structure
+ * @disk: [in] pointer to gendisk structure that references a DASD
+ * @info: [out] pointer to the dasd_information2_t structure
*
* Provide access to DASD specific information.
* The gendisk structure is checked if it belongs to the DASD driver by
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 5be3d1c39a78..59e513d34b0f 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -30,7 +30,7 @@
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
-static blk_qc_t dcssblk_submit_bio(struct bio *bio);
+static void dcssblk_submit_bio(struct bio *bio);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
@@ -854,7 +854,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
up_write(&dcssblk_devices_sem);
}
-static blk_qc_t
+static void
dcssblk_submit_bio(struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
@@ -907,10 +907,9 @@ dcssblk_submit_bio(struct bio *bio)
bytes_done += bvec.bv_len;
}
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
fail:
bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static long
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 46815e65f7a4..5def83c88f13 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -517,8 +517,10 @@ struct asd_ms_conn_map {
u8 num_nodes;
u8 usage_model_id;
u32 _resvd;
- struct asd_ms_conn_desc conn_desc[0];
- struct asd_ms_node_desc node_desc[];
+ union {
+ DECLARE_FLEX_ARRAY(struct asd_ms_conn_desc, conn_desc);
+ DECLARE_FLEX_ARRAY(struct asd_ms_node_desc, node_desc);
+ };
} __attribute__ ((packed));
struct asd_ctrla_phy_entry {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 3ab669dc806f..27884f3106ab 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3,6 +3,7 @@
* Copyright (c) 2017 Hisilicon Limited.
*/
+#include <linux/sched/clock.h>
#include "hisi_sas.h"
#define DRV_NAME "hisi_sas_v3_hw"
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3f6f14f0cafb..24b72ee4246f 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
- shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
+ /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
+ shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
error = scsi_init_sense_cache(shost);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 1f1586ad48fe..01f79991bf4a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1696,6 +1696,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
spin_lock_irqsave(&evt->queue->l_lock, flags);
list_add_tail(&evt->queue_list, &evt->queue->sent);
+ atomic_set(&evt->active, 1);
mb();
@@ -1710,6 +1711,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
be64_to_cpu(crq_as_u64[1]));
if (rc) {
+ atomic_set(&evt->active, 0);
list_del(&evt->queue_list);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
del_timer(&evt->timer);
@@ -1737,7 +1739,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->done(evt);
} else {
- atomic_set(&evt->active, 1);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
ibmvfc_trc_start(evt);
}
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 50df7dd9cb91..ea8e01f49cba 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1055,8 +1055,9 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
return SCSI_MLQUEUE_HOST_BUSY;
/* Set up the actual SRP IU */
+ BUILD_BUG_ON(sizeof(evt_struct->iu.srp) != SRP_MAX_IU_LEN);
+ memset(&evt_struct->iu.srp, 0x00, sizeof(evt_struct->iu.srp));
srp_cmd = &evt_struct->iu.srp.cmd;
- memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->opcode = SRP_CMD;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
int_to_scsilun(lun, &srp_cmd->lun);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index befeb7c34290..337e6ed24821 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -22,6 +22,7 @@
*******************************************************************/
#include <scsi/scsi_host.h>
+#include <linux/hashtable.h>
#include <linux/ktime.h>
#include <linux/workqueue.h>
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 2197988333fe..3cae8803383b 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3736,7 +3736,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_lun = -1;
shost->unique_id = mrioc->id;
- shost->max_channel = 1;
+ shost->max_channel = 0;
shost->max_id = 0xFFFFFFFF;
if (prot_mask >= 0)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d383d4a03436..ad1b6c2b37a7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5065,9 +5065,12 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
+ if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
+
+ if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
+
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
cpu_to_be32(scsi_prot_ref_tag(scmd));
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4b5d28d89d69..655cf5de604b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -431,7 +431,7 @@ done_unmap_sg:
goto done_free_fcport;
done_free_fcport:
- if (bsg_request->msgcode == FC_BSG_RPT_ELS)
+ if (bsg_request->msgcode != FC_BSG_RPT_ELS)
qla2x00_free_fcport(fcport);
done:
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 1c5da2dbd6f9..253055cf9daf 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -8,6 +8,8 @@
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@@ -642,6 +644,18 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return rval;
}
+static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
+ struct blk_mq_queue_map *map)
+{
+ struct scsi_qla_host *vha = lport->private;
+ int rc;
+
+ rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
+ if (rc)
+ ql_log(ql_log_warn, vha, 0x21de,
+ "pci map queue failed 0x%x", rc);
+}
+
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
{
struct scsi_qla_host *vha = lport->private;
@@ -676,6 +690,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
+ .map_queues = qla_nvme_map_queues,
.max_hw_queues = 8,
.max_sgl_segments = 1024,
.max_dif_sgl_segments = 64,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d2e40aaba734..836fedcea241 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4157,7 +4157,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ql_dbg_pci(ql_dbg_init, ha->pdev,
0xe0ee, "%s: failed alloc dsd\n",
__func__);
- return 1;
+ return -ENOMEM;
}
ha->dif_bundle_kallocs++;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b3478ed9b12e..7d8242c120fc 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3319,8 +3319,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset);
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return 0;
+ goto out_unmap_unlock;
}
/* Does F/W have an IOCBs for this request */
@@ -3445,10 +3444,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
prm.sg = NULL;
prm.req_cnt = 1;
- /* Calculate number of entries and segments required */
- if (qlt_pci_map_calc_cnt(&prm) != 0)
- return -EAGAIN;
-
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
@@ -3466,6 +3461,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
return 0;
}
+ /* Calculate number of entries and segments required */
+ if (qlt_pci_map_calc_cnt(&prm) != 0)
+ return -EAGAIN;
+
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
@@ -3870,9 +3869,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
BUG_ON(cmd->cmd_in_wq);
- if (cmd->sg_mapped)
- qlt_unmap_sg(cmd->vha, cmd);
-
if (!cmd->q_full)
qlt_decr_num_pend_cmds(cmd->vha);
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 031569c496e5..69a590546bf9 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -366,13 +366,13 @@ struct qla4_work_evt {
struct {
enum iscsi_host_event_code code;
uint32_t data_size;
- uint8_t data[0];
+ uint8_t data[];
} aen;
struct {
uint32_t status;
uint32_t pid;
uint32_t data_size;
- uint8_t data[0];
+ uint8_t data[];
} ping;
} u;
};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b241f9e3885c..291ecc33b1fe 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -553,8 +553,10 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
- module_put(sdev->host->hostt->module);
+ struct module *mod = sdev->host->hostt->module;
+
put_device(&sdev->sdev_gendev);
+ module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
index 81c3853a2a80..081b84bb7985 100644
--- a/drivers/scsi/scsi_bsg.c
+++ b/drivers/scsi/scsi_bsg.c
@@ -25,8 +25,8 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
return -EOPNOTSUPP;
}
- rq = blk_get_request(q, hdr->dout_xfer_len ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ rq = scsi_alloc_request(q, hdr->dout_xfer_len ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
rq->timeout = timeout;
@@ -95,7 +95,7 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
out_free_cmd:
scsi_req_free_cmd(scsi_req(rq));
out_put_request:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return ret;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 66f507469a31..40b473eea357 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5384,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
{
bool new_sd_dp;
bool inject = false;
- bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
+ bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
int k, num_in_q, qdepth;
unsigned long iflags;
u64 ns_from_boot = 0;
@@ -5471,7 +5471,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_host_max_queue)
sd_dp->hc_idx = get_tag(cmnd);
- if (hipri)
+ if (polled)
ns_from_boot = ktime_get_boottime_ns();
/* one of the resp_*() response functions is called here */
@@ -5531,7 +5531,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
kt -= d;
}
}
- if (hipri) {
+ if (polled) {
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (!sd_dp->init_poll) {
@@ -5562,7 +5562,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
atomic_read(&sdeb_inject_pending)))
sd_dp->aborted = true;
- if (hipri) {
+ if (polled) {
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (!sd_dp->init_poll) {
@@ -7331,7 +7331,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
if (kt_from_boot < sd_dp->cmpl_ts)
continue;
- } else /* ignoring non REQ_HIPRI requests */
+ } else /* ignoring non REQ_POLLED requests */
continue;
devip = (struct sdebug_dev_info *)scp->device->hostdata;
if (likely(devip))
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b6c86cce57bf..36870b41c888 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1979,7 +1979,7 @@ maybe_retry:
static void eh_lock_door_done(struct request *req, blk_status_t status)
{
- blk_put_request(req);
+ blk_mq_free_request(req);
}
/**
@@ -1998,7 +1998,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
struct request *req;
struct scsi_request *rq;
- req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
+ req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return;
rq = scsi_req(req);
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 6ff2207bd45a..34412eac4566 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -438,7 +438,7 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
at_head = 1;
ret = -ENOMEM;
- rq = blk_get_request(sdev->request_queue, writing ?
+ rq = scsi_alloc_request(sdev->request_queue, writing ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -490,7 +490,7 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
out_free_cdb:
scsi_req_free_cmd(req);
out_put_request:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return ret;
}
@@ -561,7 +561,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
}
- rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ rq = scsi_alloc_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto error_free_buffer;
@@ -634,7 +634,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
}
error:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
error_free_buffer:
kfree(buffer);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 572673873ddf..9c2b99e12ce3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -21,6 +21,7 @@
#include <linux/hardirq.h>
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
+#include <linux/blk-integrity.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
@@ -215,7 +216,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
struct scsi_request *rq;
int ret;
- req = blk_get_request(sdev->request_queue,
+ req = scsi_alloc_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
@@ -259,7 +260,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
ret = rq->result;
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
return ret;
}
@@ -1078,9 +1079,6 @@ EXPORT_SYMBOL(scsi_alloc_sgtables);
* This function initializes the members of struct scsi_cmnd that must be
* initialized before request processing starts and that won't be
* reinitialized if a SCSI command is requeued.
- *
- * Called from inside blk_get_request() for pass-through requests and from
- * inside scsi_init_command() for filesystem requests.
*/
static void scsi_initialize_rq(struct request *rq)
{
@@ -1097,6 +1095,18 @@ static void scsi_initialize_rq(struct request *rq)
cmd->retries = 0;
}
+struct request *scsi_alloc_request(struct request_queue *q,
+ unsigned int op, blk_mq_req_flags_t flags)
+{
+ struct request *rq;
+
+ rq = blk_mq_alloc_request(q, op, flags);
+ if (!IS_ERR(rq))
+ scsi_initialize_rq(rq);
+ return rq;
+}
+EXPORT_SYMBOL_GPL(scsi_alloc_request);
+
/*
* Only called when the request isn't completed by SCSI, and not freed by
* SCSI
@@ -1783,7 +1793,7 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
}
-static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx)
+static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct Scsi_Host *shost = hctx->driver_data;
@@ -1863,7 +1873,6 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
#endif
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
- .initialize_rq_fn = scsi_initialize_rq,
.cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
@@ -1893,7 +1902,6 @@ static const struct blk_mq_ops scsi_mq_ops = {
#endif
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
- .initialize_rq_fn = scsi_initialize_rq,
.cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
@@ -1959,6 +1967,14 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
return sdev;
}
+/*
+ * pktcdvd should have been integrated into the SCSI layers, but for historical
+ * reasons like the old IDE driver it isn't. This export allows it to safely
+ * probe if a given device is a SCSI one and only attach to that.
+ */
+#ifdef CONFIG_CDROM_PKTCDVD_MODULE
+EXPORT_SYMBOL_GPL(scsi_device_from_queue);
+#endif
/**
* scsi_block_requests - Utility function used by low-level drivers to prevent
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index fe22191522a3..2808c0cb5711 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -280,7 +280,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->request_queue = q;
q->queuedata = sdev;
__scsi_init_queue(sdev->host, q);
- blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
WARN_ON_ONCE(!blk_get_queue(q));
depth = sdev->host->cmd_per_lun ?: 1;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 86793259e541..a35841b34bfd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -449,9 +449,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
unsigned long flags;
+ struct module *mod;
sdev = container_of(work, struct scsi_device, ew.work);
+ mod = sdev->host->hostt->module;
+
scsi_dh_release_device(sdev);
parent = sdev->sdev_gendev.parent;
@@ -502,11 +505,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
if (parent)
put_device(parent);
+ module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
+
+ /* Set module pointer as NULL in case of module unloading */
+ if (!try_module_get(sdp->host->hostt->module))
+ sdp->host->hostt->module = NULL;
+
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 922e4c7bd88e..78343d3f9385 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2930,8 +2930,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
- err = transport->set_param(conn, ev->u.set_param.param,
- data, ev->u.set_param.len);
if ((conn->state == ISCSI_CONN_BOUND) ||
(conn->state == ISCSI_CONN_UP)) {
err = transport->set_param(conn, ev->u.set_param.param,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 523bf2fdc253..252e43d7c73f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -48,6 +48,7 @@
#include <linux/blkpg.h>
#include <linux/blk-pm.h>
#include <linux/delay.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
#include <linux/async.h>
@@ -1756,6 +1757,44 @@ static void sd_rescan(struct device *dev)
sd_revalidate_disk(sdkp->disk);
}
+static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id type)
+{
+ struct scsi_device *sdev = scsi_disk(disk)->device;
+ const struct scsi_vpd *vpd;
+ const unsigned char *d;
+ int ret = -ENXIO, len;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg83);
+ if (!vpd)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
+ /* we only care about designators with LU association */
+ if (((d[1] >> 4) & 0x3) != 0x00)
+ continue;
+ if ((d[1] & 0xf) != type)
+ continue;
+
+ /*
+ * Only exit early if a 16-byte descriptor was found. Otherwise
+ * keep looking as one with more entropy might still show up.
+ */
+ len = d[3];
+ if (len != 8 && len != 12 && len != 16)
+ continue;
+ ret = len;
+ memcpy(id, d + 4, len);
+ if (len == 16)
+ break;
+ }
+out_unlock:
+ rcu_read_unlock();
+ return ret;
+}
+
static char sd_pr_type(enum pr_type type)
{
switch (type) {
@@ -1860,6 +1899,7 @@ static const struct block_device_operations sd_fops = {
.check_events = sd_check_events,
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
+ .get_unique_id = sd_get_unique_id,
.pr_ops = &sd_pr_ops,
};
@@ -3087,6 +3127,86 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->security = 1;
}
+static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
+{
+ return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
+}
+
+/**
+ * sd_read_cpr - Query concurrent positioning ranges
+ * @sdkp: disk to query
+ */
+static void sd_read_cpr(struct scsi_disk *sdkp)
+{
+ struct blk_independent_access_ranges *iars = NULL;
+ unsigned char *buffer = NULL;
+ unsigned int nr_cpr = 0;
+ int i, vpd_len, buf_len = SD_BUF_SIZE;
+ u8 *desc;
+
+ /*
+ * We need to have the capacity set first for the block layer to be
+ * able to check the ranges.
+ */
+ if (sdkp->first_scan)
+ return;
+
+ if (!sdkp->capacity)
+ goto out;
+
+ /*
+ * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
+ * leading to a maximum page size of 64 + 256*32 bytes.
+ */
+ buf_len = 64 + 256*32;
+ buffer = kmalloc(buf_len, GFP_KERNEL);
+ if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
+ goto out;
+
+ /* We must have at least a 64B header and one 32B range descriptor */
+ vpd_len = get_unaligned_be16(&buffer[2]) + 3;
+ if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid Concurrent Positioning Ranges VPD page\n");
+ goto out;
+ }
+
+ nr_cpr = (vpd_len - 64) / 32;
+ if (nr_cpr == 1) {
+ nr_cpr = 0;
+ goto out;
+ }
+
+ iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
+ if (!iars) {
+ nr_cpr = 0;
+ goto out;
+ }
+
+ desc = &buffer[64];
+ for (i = 0; i < nr_cpr; i++, desc += 32) {
+ if (desc[0] != i) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid Concurrent Positioning Range number\n");
+ nr_cpr = 0;
+ break;
+ }
+
+ iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
+ iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
+ }
+
+out:
+ disk_set_independent_access_ranges(sdkp->disk, iars);
+ if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u concurrent positioning ranges\n", nr_cpr);
+ sdkp->nr_actuators = nr_cpr;
+ }
+
+ kfree(buffer);
+}
+
/*
* Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, not a
@@ -3202,6 +3322,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
+ sd_read_cpr(sdkp);
}
/*
@@ -3683,7 +3804,12 @@ static int sd_resume(struct device *dev)
static int sd_resume_runtime(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
- struct scsi_device *sdp = sdkp->device;
+ struct scsi_device *sdp;
+
+ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
+ return 0;
+
+ sdp = sdkp->device;
if (sdp->ignore_media_change) {
/* clear the device's sense data */
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index b59136c4125b..2e5932bde43d 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -106,6 +106,7 @@ struct scsi_disk {
u8 protection_type;/* Data Integrity Field */
u8 provisioning_mode;
u8 zeroing_mode;
+ u8 nr_actuators; /* Number of actuators */
unsigned ATO : 1; /* state of disk ATO bit */
unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 4cadb26070a8..349950616adc 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -6,7 +6,7 @@
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*/
-#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/t10-pi.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8f05248920e8..141099ab9092 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -31,6 +31,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/errno.h>
#include <linux/mtio.h>
#include <linux/ioctl.h>
+#include <linux/major.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/init.h>
@@ -814,7 +815,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
scsi_req_free_cmd(scsi_req(srp->rq));
- blk_put_request(srp->rq);
+ blk_mq_free_request(srp->rq);
srp->rq = NULL;
}
@@ -1389,7 +1390,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
*/
srp->rq = NULL;
scsi_req_free_cmd(scsi_req(rq));
- blk_put_request(rq);
+ blk_mq_free_request(rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (unlikely(srp->orphan)) {
@@ -1717,13 +1718,13 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
- * preallocated requests are already in use, then blk_get_request()
+ * preallocated requests are already in use, then scsi_alloc_request()
* will sleep until an active command completes, freeing up a request.
* Although waiting in an asynchronous interface is less than ideal, we
* do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
* not expect an EWOULDBLOCK from this condition.
*/
- rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
+ rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq)) {
kfree(long_cmdp);
@@ -1829,7 +1830,7 @@ sg_finish_rem_req(Sg_request *srp)
if (srp->rq) {
scsi_req_free_cmd(scsi_req(srp->rq));
- blk_put_request(srp->rq);
+ blk_mq_free_request(srp->rq);
}
if (srp->res_used)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 8b17b35283aa..3009b986d1d7 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -44,6 +44,7 @@
#include <linux/cdrom.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/blk-pm.h>
#include <linux/mutex.h>
@@ -966,7 +967,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
struct bio *bio;
int ret;
- rq = blk_get_request(disk->queue, REQ_OP_DRV_IN, 0);
+ rq = scsi_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
req = scsi_req(rq);
@@ -1002,7 +1003,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
if (blk_rq_unmap_user(bio))
ret = -EFAULT;
out_put_request:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return ret;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ae8636d3780b..c2d5608f6b1a 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -32,6 +32,7 @@ static const char *verstr = "20160209";
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mtio.h>
+#include <linux/major.h>
#include <linux/cdrom.h>
#include <linux/ioctl.h>
#include <linux/fcntl.h>
@@ -529,7 +530,7 @@ static void st_scsi_execute_end(struct request *req, blk_status_t status)
complete(SRpnt->waiting);
blk_rq_unmap_user(tmp);
- blk_put_request(req);
+ blk_mq_free_request(req);
}
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
@@ -542,7 +543,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
int err = 0;
struct scsi_tape *STp = SRpnt->stp;
- req = blk_get_request(SRpnt->stp->device->request_queue,
+ req = scsi_alloc_request(SRpnt->stp->device->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
@@ -556,7 +557,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
GFP_KERNEL);
if (err) {
- blk_put_request(req);
+ blk_mq_free_request(req);
return err;
}
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index ebbbc1299c62..9eb1b88a29dd 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1285,11 +1285,15 @@ static void storvsc_on_channel_callback(void *context)
foreach_vmbus_pkt(desc, channel) {
struct vstor_packet *packet = hv_pkt_data(desc);
struct storvsc_cmd_request *request = NULL;
+ u32 pktlen = hv_pkt_datalen(desc);
u64 rqst_id = desc->trans_id;
+ u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
+ stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
- if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta) {
- dev_err(&device->device, "Invalid packet len\n");
+ if (pktlen < minlen) {
+ dev_err(&device->device,
+ "Invalid pkt: id=%llu, len=%u, minlen=%u\n",
+ rqst_id, pktlen, minlen);
continue;
}
@@ -1302,13 +1306,23 @@ static void storvsc_on_channel_callback(void *context)
if (rqst_id == 0) {
/*
* storvsc_on_receive() looks at the vstor_packet in the message
- * from the ring buffer. If the operation in the vstor_packet is
- * COMPLETE_IO, then we call storvsc_on_io_completion(), and
- * dereference the guest memory address. Make sure we don't call
- * storvsc_on_io_completion() with a guest memory address that is
- * zero if Hyper-V were to construct and send such a bogus packet.
+ * from the ring buffer.
+ *
+ * - If the operation in the vstor_packet is COMPLETE_IO, then
+ * we call storvsc_on_io_completion(), and dereference the
+ * guest memory address. Make sure we don't call
+ * storvsc_on_io_completion() with a guest memory address
+ * that is zero if Hyper-V were to construct and send such
+ * a bogus packet.
+ *
+ * - If the operation in the vstor_packet is FCHBA_DATA, then
+ * we call cache_wwn(), and access the data payload area of
+ * the packet (wwn_packet); however, there is no guarantee
+ * that the packet is big enough to contain such area.
+ * Future-proof the code by rejecting such a bogus packet.
*/
- if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) {
+ if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
+ packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
dev_err(&device->device, "Invalid packet with ID of 0\n");
continue;
}
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index a14dd8ce56d4..bb2dd79a1bcd 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -642,9 +642,9 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
}
/* setting for three timeout values for traffic class #0 */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
return 0;
out:
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
index d70cdcd35e43..67402baf6fae 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/scsi/ufs/ufshcd-crypto.c
@@ -48,11 +48,12 @@ out:
return err;
}
-static int ufshcd_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
+static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct ufs_hba *hba = container_of(ksm, struct ufs_hba, ksm);
+ struct ufs_hba *hba =
+ container_of(profile, struct ufs_hba, crypto_profile);
const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array;
const struct ufs_crypto_alg_entry *alg =
&ufs_crypto_algs[key->crypto_cfg.crypto_mode];
@@ -105,11 +106,12 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
return ufshcd_program_key(hba, &cfg, slot);
}
-static int ufshcd_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
+static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct ufs_hba *hba = container_of(ksm, struct ufs_hba, ksm);
+ struct ufs_hba *hba =
+ container_of(profile, struct ufs_hba, crypto_profile);
return ufshcd_clear_keyslot(hba, slot);
}
@@ -120,11 +122,11 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba)
return false;
/* Reset might clear all keys, so reprogram all the keys. */
- blk_ksm_reprogram_all_keys(&hba->ksm);
+ blk_crypto_reprogram_all_keys(&hba->crypto_profile);
return true;
}
-static const struct blk_ksm_ll_ops ufshcd_ksm_ops = {
+static const struct blk_crypto_ll_ops ufshcd_crypto_ops = {
.keyslot_program = ufshcd_crypto_keyslot_program,
.keyslot_evict = ufshcd_crypto_keyslot_evict,
};
@@ -179,15 +181,16 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
}
/* The actual number of configurations supported is (CFGC+1) */
- err = devm_blk_ksm_init(hba->dev, &hba->ksm,
- hba->crypto_capabilities.config_count + 1);
+ err = devm_blk_crypto_profile_init(
+ hba->dev, &hba->crypto_profile,
+ hba->crypto_capabilities.config_count + 1);
if (err)
goto out;
- hba->ksm.ksm_ll_ops = ufshcd_ksm_ops;
+ hba->crypto_profile.ll_ops = ufshcd_crypto_ops;
/* UFS only supports 8 bytes for any DUN */
- hba->ksm.max_dun_bytes_supported = 8;
- hba->ksm.dev = hba->dev;
+ hba->crypto_profile.max_dun_bytes_supported = 8;
+ hba->crypto_profile.dev = hba->dev;
/*
* Cache all the UFS crypto capabilities and advertise the supported
@@ -202,7 +205,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
blk_mode_num = ufshcd_find_blk_crypto_mode(
hba->crypto_cap_array[cap_idx]);
if (blk_mode_num != BLK_ENCRYPTION_MODE_INVALID)
- hba->ksm.crypto_modes_supported[blk_mode_num] |=
+ hba->crypto_profile.modes_supported[blk_mode_num] |=
hba->crypto_cap_array[cap_idx].sdus_mask * 512;
}
@@ -230,9 +233,8 @@ void ufshcd_init_crypto(struct ufs_hba *hba)
ufshcd_clear_keyslot(hba, slot);
}
-void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
- struct request_queue *q)
+void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
{
if (hba->caps & UFSHCD_CAP_CRYPTO)
- blk_ksm_register(&hba->ksm, q);
+ blk_crypto_register(&hba->crypto_profile, q);
}
diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h
index 78a58e788dff..e18c01276873 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.h
+++ b/drivers/scsi/ufs/ufshcd-crypto.h
@@ -18,7 +18,7 @@ static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
return;
}
- lrbp->crypto_key_slot = blk_ksm_get_slot_idx(rq->crypt_keyslot);
+ lrbp->crypto_key_slot = blk_crypto_keyslot_index(rq->crypt_keyslot);
lrbp->data_unit_num = rq->crypt_ctx->bc_dun[0];
}
@@ -40,8 +40,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba);
void ufshcd_init_crypto(struct ufs_hba *hba);
-void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
- struct request_queue *q);
+void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q);
#else /* CONFIG_SCSI_UFS_CRYPTO */
@@ -64,8 +63,8 @@ static inline int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
static inline void ufshcd_init_crypto(struct ufs_hba *hba) { }
-static inline void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
- struct request_queue *q) { }
+static inline void ufshcd_crypto_register(struct ufs_hba *hba,
+ struct request_queue *q) { }
#endif /* CONFIG_SCSI_UFS_CRYPTO */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 149c1aa09103..51424557810d 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -370,20 +370,6 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
{
- /*
- * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
- * address registers must be restored because the restore kernel can
- * have used different addresses.
- */
- ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
- REG_UTP_TRANSFER_REQ_LIST_BASE_L);
- ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
- REG_UTP_TRANSFER_REQ_LIST_BASE_H);
- ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
- REG_UTP_TASK_REQ_LIST_BASE_L);
- ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
- REG_UTP_TASK_REQ_LIST_BASE_H);
-
if (ufshcd_is_link_hibern8(hba)) {
int ret = ufshcd_uic_hibern8_exit(hba);
@@ -463,6 +449,18 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_pci_restore(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ /* Force a full reset and restore */
+ ufshcd_set_link_off(hba);
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
* @pdev: pointer to PCI device handle
@@ -546,9 +544,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_system_suspend,
+ .resume = ufshcd_system_resume,
+ .freeze = ufshcd_system_suspend,
+ .thaw = ufshcd_system_resume,
+ .poweroff = ufshcd_system_suspend,
+ .restore = ufshcd_pci_restore,
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
#endif
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 95be7ecdfe10..db1bc8655d46 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2737,12 +2737,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->req_abort_skip = false;
- err = ufshpb_prep(hba, lrbp);
- if (err == -EAGAIN) {
- lrbp->cmd = NULL;
- ufshcd_release(hba);
- goto out;
- }
+ ufshpb_prep(hba, lrbp);
ufshcd_comp_scsi_upiu(hba, lrbp);
@@ -2925,7 +2920,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_unlock;
@@ -2952,7 +2947,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
@@ -4986,7 +4981,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
else if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
- ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
+ ufshcd_crypto_register(hba, q);
return 0;
}
@@ -6517,9 +6512,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
int task_tag, err;
/*
- * blk_get_request() is used here only to get a free tag.
+ * blk_mq_alloc_request() is used here only to get a free tag.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -6575,7 +6570,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
- blk_put_request(req);
+ blk_mq_free_request(req);
return err;
}
@@ -6660,7 +6655,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
down_read(&hba->clk_scaling_lock);
- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_unlock;
@@ -6741,7 +6736,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
@@ -7912,7 +7907,7 @@ static void ufshcd_request_sense_done(struct request *rq, blk_status_t error)
if (error != BLK_STS_OK)
pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error);
kfree(rq->end_io_data);
- blk_put_request(rq);
+ blk_mq_free_request(rq);
}
static int
@@ -7932,7 +7927,7 @@ ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
if (!buffer)
return -ENOMEM;
- req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN,
+ req = blk_mq_alloc_request(sdev->request_queue, REQ_OP_DRV_IN,
/*flags=*/BLK_MQ_REQ_PM);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -7957,7 +7952,7 @@ ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
return 0;
out_put:
- blk_put_request(req);
+ blk_mq_free_request(req);
out_free:
kfree(buffer);
return ret;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 41f6e06f9185..62bdc412d38a 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -32,7 +32,7 @@
#include <linux/regulator/consumer.h>
#include <linux/bitfield.h>
#include <linux/devfreq.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#include "unipro.h"
#include <asm/irq.h>
@@ -766,7 +766,7 @@ struct ufs_hba_monitor {
* @crypto_capabilities: Content of crypto capabilities register (0x100)
* @crypto_cap_array: Array of crypto capabilities
* @crypto_cfg_register: Start of the crypto cfg array
- * @ksm: the keyslot manager tied to this hba
+ * @crypto_profile: the crypto profile of this hba (if applicable)
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -911,7 +911,7 @@ struct ufs_hba {
union ufs_crypto_capabilities crypto_capabilities;
union ufs_crypto_cap_entry *crypto_cap_array;
u32 crypto_cfg_register;
- struct blk_keyslot_manager ksm;
+ struct blk_crypto_profile crypto_profile;
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_root;
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 589af5f6b940..182bcbf60f8c 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -84,16 +84,6 @@ static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
return transfer_len <= hpb->pre_req_max_tr_len;
}
-/*
- * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
- * default. It is possible to change range of transfer_len through sysfs.
- */
-static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
-{
- return len > hpb->pre_req_min_tr_len &&
- len <= hpb->pre_req_max_tr_len;
-}
-
static bool ufshpb_is_general_lun(int lun)
{
return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
@@ -334,7 +324,7 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
static void
ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- __be64 ppn, u8 transfer_len, int read_id)
+ __be64 ppn, u8 transfer_len)
{
unsigned char *cdb = lrbp->cmd->cmnd;
__be64 ppn_tmp = ppn;
@@ -346,256 +336,11 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
/* ppn value is stored as big-endian in the host memory */
memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
cdb[14] = transfer_len;
- cdb[15] = read_id;
+ cdb[15] = 0;
lrbp->cmd->cmd_len = UFS_CDB_SIZE;
}
-static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
- unsigned long lpn, unsigned int len,
- int read_id)
-{
- cdb[0] = UFSHPB_WRITE_BUFFER;
- cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
-
- put_unaligned_be32(lpn, &cdb[2]);
- cdb[6] = read_id;
- put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
-
- cdb[9] = 0x00; /* Control = 0x00 */
-}
-
-static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
-{
- struct ufshpb_req *pre_req;
-
- if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
- dev_info(&hpb->sdev_ufs_lu->sdev_dev,
- "pre_req throttle. inflight %d throttle %d",
- hpb->num_inflight_pre_req, hpb->throttle_pre_req);
- return NULL;
- }
-
- pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
- struct ufshpb_req, list_req);
- if (!pre_req) {
- dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
- return NULL;
- }
-
- list_del_init(&pre_req->list_req);
- hpb->num_inflight_pre_req++;
-
- return pre_req;
-}
-
-static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *pre_req)
-{
- pre_req->req = NULL;
- bio_reset(pre_req->bio);
- list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
- hpb->num_inflight_pre_req--;
-}
-
-static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
-{
- struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
- struct ufshpb_lu *hpb = pre_req->hpb;
- unsigned long flags;
-
- if (error) {
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
- struct scsi_sense_hdr sshdr;
-
- dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
- scsi_command_normalize_sense(cmd, &sshdr);
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "code %x sense_key %x asc %x ascq %x",
- sshdr.response_code,
- sshdr.sense_key, sshdr.asc, sshdr.ascq);
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "byte4 %x byte5 %x byte6 %x additional_len %x",
- sshdr.byte4, sshdr.byte5,
- sshdr.byte6, sshdr.additional_length);
- }
-
- blk_mq_free_request(req);
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- ufshpb_put_pre_req(pre_req->hpb, pre_req);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-}
-
-static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
-{
- struct ufshpb_lu *hpb = pre_req->hpb;
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- __be64 *addr;
- int offset = 0;
- int copied;
- unsigned long lpn = pre_req->wb.lpn;
- int rgn_idx, srgn_idx, srgn_offset;
- unsigned long flags;
-
- addr = page_address(page);
- ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
-next_offset:
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- if (!ufshpb_is_valid_srgn(rgn, srgn))
- goto mctx_error;
-
- if (!srgn->mctx)
- goto mctx_error;
-
- copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
- pre_req->wb.len - offset,
- &addr[offset]);
-
- if (copied < 0)
- goto mctx_error;
-
- offset += copied;
- srgn_offset += copied;
-
- if (srgn_offset == hpb->entries_per_srgn) {
- srgn_offset = 0;
-
- if (++srgn_idx == hpb->srgns_per_rgn) {
- srgn_idx = 0;
- rgn_idx++;
- }
- }
-
- if (offset < pre_req->wb.len)
- goto next_offset;
-
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return 0;
-mctx_error:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return -ENOMEM;
-}
-
-static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
- struct request_queue *q,
- struct ufshpb_req *pre_req)
-{
- struct page *page = pre_req->wb.m_page;
- struct bio *bio = pre_req->bio;
- int entries_bytes, ret;
-
- if (!page)
- return -ENOMEM;
-
- if (ufshpb_prep_entry(pre_req, page))
- return -ENOMEM;
-
- entries_bytes = pre_req->wb.len * sizeof(__be64);
-
- ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
- if (ret != entries_bytes) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "bio_add_pc_page fail: %d", ret);
- return -ENOMEM;
- }
- return 0;
-}
-
-static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
-{
- if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
- hpb->cur_read_id = 1;
- return hpb->cur_read_id;
-}
-
-static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
- struct ufshpb_req *pre_req, int read_id)
-{
- struct scsi_device *sdev = cmd->device;
- struct request_queue *q = sdev->request_queue;
- struct request *req;
- struct scsi_request *rq;
- struct bio *bio = pre_req->bio;
-
- pre_req->hpb = hpb;
- pre_req->wb.lpn = sectors_to_logical(cmd->device,
- blk_rq_pos(scsi_cmd_to_rq(cmd)));
- pre_req->wb.len = sectors_to_logical(cmd->device,
- blk_rq_sectors(scsi_cmd_to_rq(cmd)));
- if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
- return -ENOMEM;
-
- req = pre_req->req;
-
- /* 1. request setup */
- blk_rq_append_bio(req, bio);
- req->rq_disk = NULL;
- req->end_io_data = (void *)pre_req;
- req->end_io = ufshpb_pre_req_compl_fn;
-
- /* 2. scsi_request setup */
- rq = scsi_req(req);
- rq->retries = 1;
-
- ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
- read_id);
- rq->cmd_len = scsi_command_size(rq->cmd);
-
- if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
- return -EAGAIN;
-
- hpb->stats.pre_req_cnt++;
-
- return 0;
-}
-
-static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
- int *read_id)
-{
- struct ufshpb_req *pre_req;
- struct request *req = NULL;
- unsigned long flags;
- int _read_id;
- int ret = 0;
-
- req = blk_get_request(cmd->device->request_queue,
- REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(req))
- return -EAGAIN;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- pre_req = ufshpb_get_pre_req(hpb);
- if (!pre_req) {
- ret = -EAGAIN;
- goto unlock_out;
- }
- _read_id = ufshpb_get_read_id(hpb);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- pre_req->req = req;
-
- ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
- if (ret)
- goto free_pre_req;
-
- *read_id = _read_id;
-
- return ret;
-free_pre_req:
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- ufshpb_put_pre_req(hpb, pre_req);
-unlock_out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- blk_put_request(req);
- return ret;
-}
-
/*
* This function will set up HPB read command using host-side L2P map data.
*/
@@ -609,7 +354,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
__be64 ppn;
unsigned long flags;
int transfer_len, rgn_idx, srgn_idx, srgn_offset;
- int read_id = 0;
int err = 0;
hpb = ufshpb_get_hpb_data(cmd->device);
@@ -685,24 +429,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev, "get ppn failed. err %d\n", err);
return err;
}
- if (!ufshpb_is_legacy(hba) &&
- ufshpb_is_required_wb(hpb, transfer_len)) {
- err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
- if (err) {
- unsigned long timeout;
-
- timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
- hpb->params.requeue_timeout_ms);
-
- if (time_before(jiffies, timeout))
- return -EAGAIN;
-
- hpb->stats.miss_cnt++;
- return 0;
- }
- }
- ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
+ ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
hpb->stats.hit_cnt++;
return 0;
@@ -721,7 +449,7 @@ static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
return NULL;
retry:
- req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
+ req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
BLK_MQ_REQ_NOWAIT);
if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
@@ -745,7 +473,7 @@ free_rq:
static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
{
- blk_put_request(rq->req);
+ blk_mq_free_request(rq->req);
kmem_cache_free(hpb->map_req_cache, rq);
}
@@ -1841,16 +1569,11 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
u32 entries_per_rgn;
u64 rgn_mem_size, tmp;
- /* for pre_req */
- hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
-
if (ufshpb_is_legacy(hba))
hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
else
hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
- hpb->cur_read_id = 0;
-
hpb->lu_pinned_start = hpb_lu_info->pinned_start;
hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
index a79e07398970..f15d8fdbce2e 100644
--- a/drivers/scsi/ufs/ufshpb.h
+++ b/drivers/scsi/ufs/ufshpb.h
@@ -241,8 +241,6 @@ struct ufshpb_lu {
spinlock_t param_lock;
struct list_head lh_pre_req_free;
- int cur_read_id;
- int pre_req_min_tr_len;
int pre_req_max_tr_len;
/* cached L2P map management worker */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 07d0250f17c3..b8455fcbf18b 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -22,6 +22,7 @@
#include <linux/virtio_scsi.h>
#include <linux/cpu.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c
index 44fc9ee13fc7..ca40923258af 100644
--- a/drivers/spi/spi-altera-dfl.c
+++ b/drivers/spi/spi-altera-dfl.c
@@ -134,7 +134,7 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
if (!master)
return -ENOMEM;
- master->bus_num = dfl_dev->id;
+ master->bus_num = -1;
hw = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-altera-platform.c b/drivers/spi/spi-altera-platform.c
index f7a7c14e3679..65147aae82a1 100644
--- a/drivers/spi/spi-altera-platform.c
+++ b/drivers/spi/spi-altera-platform.c
@@ -48,7 +48,7 @@ static int altera_spi_probe(struct platform_device *pdev)
return err;
/* setup the master state. */
- master->bus_num = pdev->id;
+ master->bus_num = -1;
if (pdata) {
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index feebda66f56e..e4484ace584e 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1716,12 +1716,13 @@ static int verify_controller_parameters(struct pl022 *pl022,
return -EINVAL;
}
} else {
- if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
+ if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
dev_err(&pl022->adev->dev,
"Microwire half duplex mode requested,"
" but this is only available in the"
" ST version of PL022\n");
- return -EINVAL;
+ return -EINVAL;
+ }
}
}
return 0;
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 713292b0c71e..3226c4e1c7c0 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1194,7 +1194,7 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
return 0;
}
-static int tegra_slink_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
index 362ed44b4eff..e046489cd253 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -835,7 +835,6 @@ static int lm3554_probe(struct i2c_client *client)
int err = 0;
struct lm3554 *flash;
unsigned int i;
- int ret;
flash = kzalloc(sizeof(*flash), GFP_KERNEL);
if (!flash)
@@ -844,7 +843,7 @@ static int lm3554_probe(struct i2c_client *client)
flash->pdata = lm3554_platform_data_func(client);
if (IS_ERR(flash->pdata)) {
err = PTR_ERR(flash->pdata);
- goto fail1;
+ goto free_flash;
}
v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
@@ -852,12 +851,12 @@ static int lm3554_probe(struct i2c_client *client)
flash->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
flash->mode = ATOMISP_FLASH_MODE_OFF;
flash->timeout = LM3554_MAX_TIMEOUT / LM3554_TIMEOUT_STEPSIZE - 1;
- ret =
+ err =
v4l2_ctrl_handler_init(&flash->ctrl_handler,
ARRAY_SIZE(lm3554_controls));
- if (ret) {
+ if (err) {
dev_err(&client->dev, "error initialize a ctrl_handler.\n");
- goto fail3;
+ goto unregister_subdev;
}
for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
@@ -866,14 +865,15 @@ static int lm3554_probe(struct i2c_client *client)
if (flash->ctrl_handler.error) {
dev_err(&client->dev, "ctrl_handler error.\n");
- goto fail3;
+ err = flash->ctrl_handler.error;
+ goto free_handler;
}
flash->sd.ctrl_handler = &flash->ctrl_handler;
err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
if (err) {
dev_err(&client->dev, "error initialize a media entity.\n");
- goto fail2;
+ goto free_handler;
}
flash->sd.entity.function = MEDIA_ENT_F_FLASH;
@@ -884,16 +884,27 @@ static int lm3554_probe(struct i2c_client *client)
err = lm3554_gpio_init(client);
if (err) {
- dev_err(&client->dev, "gpio request/direction_output fail");
- goto fail3;
+ dev_err(&client->dev, "gpio request/direction_output fail.\n");
+ goto cleanup_media;
+ }
+
+ err = atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
+ if (err) {
+ dev_err(&client->dev, "fail to register atomisp i2c module.\n");
+ goto uninit_gpio;
}
- return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
-fail3:
+
+ return 0;
+
+uninit_gpio:
+ lm3554_gpio_uninit(client);
+cleanup_media:
media_entity_cleanup(&flash->sd.entity);
+free_handler:
v4l2_ctrl_handler_free(&flash->ctrl_handler);
-fail2:
+unregister_subdev:
v4l2_device_unregister_subdev(&flash->sd);
-fail1:
+free_flash:
kfree(flash);
return err;
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index 5e3670c4fc29..6c95f57a52e9 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -1714,6 +1714,8 @@ static int ov5693_detect(struct i2c_client *client)
}
ret = ov5693_read_reg(client, OV5693_8BIT,
OV5693_SC_CMMN_CHIP_ID_L, &low);
+ if (ret)
+ return ret;
id = ((((u16)high) << 8) | (u16)low);
if (id != OV5693_ID) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
index 11b6b1216473..4a9268bac8a9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
@@ -22,13 +22,11 @@
#include "atomisp_internal.h"
#include "atomisp-regs.h"
-static struct v4l2_mbus_framefmt *__csi2_get_format(struct
- atomisp_mipi_csi2_device
- * csi2,
- struct v4l2_subdev_state *sd_state,
- enum
- v4l2_subdev_format_whence
- which, unsigned int pad)
+static struct
+v4l2_mbus_framefmt *__csi2_get_format(struct atomisp_mipi_csi2_device *csi2,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which,
+ unsigned int pad)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
@@ -43,7 +41,7 @@ static struct v4l2_mbus_framefmt *__csi2_get_format(struct
* @fh : V4L2 subdev file handle
* @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
* return -EINVAL or zero on success
-*/
+ */
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
@@ -69,7 +67,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
* @pad: pad num
* @fmt: pointer to v4l2 format structure
* return -EINVAL or zero on success
-*/
+ */
static int csi2_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -104,12 +102,12 @@ int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
else
actual_ffmt->code = atomisp_in_fmt_conv[0].code;
- actual_ffmt->width = clamp_t(
- u32, ffmt->width, ATOM_ISP_MIN_WIDTH,
- ATOM_ISP_MAX_WIDTH);
- actual_ffmt->height = clamp_t(
- u32, ffmt->height, ATOM_ISP_MIN_HEIGHT,
- ATOM_ISP_MAX_HEIGHT);
+ actual_ffmt->width = clamp_t(u32, ffmt->width,
+ ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH);
+ actual_ffmt->height = clamp_t(u32, ffmt->height,
+ ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT);
tmp_ffmt = *ffmt = *actual_ffmt;
@@ -132,7 +130,7 @@ int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
* @pad: pad num
* @fmt: pointer to v4l2 format structure
* return -EINVAL or zero on success
-*/
+ */
static int csi2_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -147,7 +145,7 @@ static int csi2_set_format(struct v4l2_subdev *sd,
* @enable: Enable/disable stream (1/0)
*
* Return 0 on success or a negative error code otherwise.
-*/
+ */
static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
{
return 0;
@@ -184,7 +182,7 @@ static const struct v4l2_subdev_ops csi2_ops = {
* @remote : Pointer to remote pad array
* @flags : Link flags
* return -EINVAL or zero on success
-*/
+ */
static int csi2_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
@@ -222,10 +220,10 @@ static const struct media_entity_operations csi2_media_ops = {
};
/*
-* ispcsi2_init_entities - Initialize subdev and media entity.
-* @csi2: Pointer to ispcsi2 structure.
-* return -ENOMEM or zero on success
-*/
+ * ispcsi2_init_entities - Initialize subdev and media entity.
+ * @csi2: Pointer to ispcsi2 structure.
+ * return -ENOMEM or zero on success
+ */
static int mipi_csi2_init_entities(struct atomisp_mipi_csi2_device *csi2,
int port)
{
@@ -249,9 +247,8 @@ static int mipi_csi2_init_entities(struct atomisp_mipi_csi2_device *csi2,
if (ret < 0)
return ret;
- csi2->formats[CSI2_PAD_SINK].code =
- csi2->formats[CSI2_PAD_SOURCE].code =
- atomisp_in_fmt_conv[0].code;
+ csi2->formats[CSI2_PAD_SINK].code = atomisp_in_fmt_conv[0].code;
+ csi2->formats[CSI2_PAD_SOURCE].code = atomisp_in_fmt_conv[0].code;
return 0;
}
@@ -379,21 +376,22 @@ static void atomisp_csi2_configure_isp2401(struct atomisp_sub_device *asd)
(isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0)
mipi_freq = ctrl.value;
- clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen,
- mipi_freq, TERMEN_DEFAULT);
- clk_settle = atomisp_csi2_configure_calc(coeff_clk_settle,
- mipi_freq, SETTLE_DEFAULT);
- dat_termen = atomisp_csi2_configure_calc(coeff_dat_termen,
- mipi_freq, TERMEN_DEFAULT);
- dat_settle = atomisp_csi2_configure_calc(coeff_dat_settle,
- mipi_freq, SETTLE_DEFAULT);
+ clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen, mipi_freq,
+ TERMEN_DEFAULT);
+ clk_settle = atomisp_csi2_configure_calc(coeff_clk_settle, mipi_freq,
+ SETTLE_DEFAULT);
+ dat_termen = atomisp_csi2_configure_calc(coeff_dat_termen, mipi_freq,
+ TERMEN_DEFAULT);
+ dat_settle = atomisp_csi2_configure_calc(coeff_dat_settle, mipi_freq,
+ SETTLE_DEFAULT);
+
for (n = 0; n < csi2_port_lanes[port] + 1; n++) {
hrt_address base = csi2_port_base[port] + csi2_lane_base[n];
atomisp_css2_hw_store_32(base + CSI2_REG_RX_CSI_DLY_CNT_TERMEN,
- n == 0 ? clk_termen : dat_termen);
+ n == 0 ? clk_termen : dat_termen);
atomisp_css2_hw_store_32(base + CSI2_REG_RX_CSI_DLY_CNT_SETTLE,
- n == 0 ? clk_settle : dat_settle);
+ n == 0 ? clk_settle : dat_settle);
}
}
@@ -405,7 +403,7 @@ void atomisp_csi2_configure(struct atomisp_sub_device *asd)
/*
* atomisp_mipi_csi2_cleanup - Routine for module driver cleanup
-*/
+ */
void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp)
{
}
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 20e508158871..fb82b9297a2b 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -179,7 +179,7 @@ err_cancel_job:
hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
}
-static struct v4l2_m2m_ops vpu_m2m_ops = {
+static const struct v4l2_m2m_ops vpu_m2m_ops = {
.device_run = device_run,
};
@@ -263,9 +263,6 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
if (sps->bit_depth_luma_minus8 != 0)
/* Only 8-bit is supported */
return -EINVAL;
- if (sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED)
- /* No scaling support */
- return -EINVAL;
}
return 0;
}
@@ -450,6 +447,11 @@ static const struct hantro_ctrl controls[] = {
}, {
.codec = HANTRO_HEVC_DECODER,
.cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
.id = V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP,
.name = "Hantro HEVC slice header skip bytes",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -978,7 +980,7 @@ static int hantro_probe(struct platform_device *pdev)
vpu->mdev.dev = vpu->dev;
strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
- sizeof(vpu->mdev.model));
+ sizeof(vpu->mdev.bus_info));
media_device_init(&vpu->mdev);
vpu->mdev.ops = &hantro_m2m_media_ops;
vpu->v4l2_dev.mdev = &vpu->mdev;
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 236ce24ca00c..f49dbfb8a843 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -29,7 +29,7 @@ static void set_params(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
u32 reg;
/* Decoder control register 0. */
- reg = G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0x0);
+ reg = G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
if (sps->profile_idc > 66) {
diff --git a/drivers/staging/media/hantro/hantro_g1_regs.h b/drivers/staging/media/hantro/hantro_g1_regs.h
index c1756e3d5391..c623b3b0be18 100644
--- a/drivers/staging/media/hantro/hantro_g1_regs.h
+++ b/drivers/staging/media/hantro/hantro_g1_regs.h
@@ -68,6 +68,8 @@
#define G1_REG_DEC_CTRL0_PICORD_COUNT_E BIT(9)
#define G1_REG_DEC_CTRL0_DEC_AHB_HLOCK_E BIT(8)
#define G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 0)
+/* Setting AXI ID to 0xff to get auto generated ID to avoid possible conflicts */
+#define G1_REG_DEC_CTRL0_DEC_AXI_AUTO G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0xff)
#define G1_REG_DEC_CTRL1 0x010
#define G1_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
#define G1_REG_DEC_CTRL1_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
index 6180b23e7d94..851eb67f19f5 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -463,7 +463,8 @@ int hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
G1_REG_CONFIG_DEC_MAX_BURST(16);
vdpu_write_relaxed(vpu, reg, G1_REG_CONFIG);
- reg = G1_REG_DEC_CTRL0_DEC_MODE(10);
+ reg = G1_REG_DEC_CTRL0_DEC_MODE(10) |
+ G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(hdr))
reg |= G1_REG_DEC_CTRL0_PIC_INTER_E;
if (!(hdr->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF))
diff --git a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
index 340efb57fd18..76a921163b9a 100644
--- a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
+++ b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
@@ -516,6 +516,56 @@ static void set_buffers(struct hantro_ctx *ctx)
hantro_write_addr(vpu, G2_TILE_BSD, ctx->hevc_dec.tile_bsd.dma);
}
+static void prepare_scaling_list_buffer(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_scaling_matrix *sc = ctrls->scaling;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ u8 *p = ((u8 *)ctx->hevc_dec.scaling_lists.cpu);
+ unsigned int scaling_list_enabled;
+ unsigned int i, j, k;
+
+ scaling_list_enabled = !!(sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED);
+ hantro_reg_write(vpu, &g2_scaling_list_e, scaling_list_enabled);
+
+ if (!scaling_list_enabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_16x16); i++)
+ *p++ = sc->scaling_list_dc_coef_16x16[i];
+
+ for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_32x32); i++)
+ *p++ = sc->scaling_list_dc_coef_32x32[i];
+
+ /* 128-bit boundary */
+ p += 8;
+
+ /* write scaling lists column by column */
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 4; j++)
+ for (k = 0; k < 4; k++)
+ *p++ = sc->scaling_list_4x4[i][4 * k + j];
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_8x8[i][8 * k + j];
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_16x16[i][8 * k + j];
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_32x32[i][8 * k + j];
+
+ hantro_write_addr(vpu, HEVC_SCALING_LIST, ctx->hevc_dec.scaling_lists.dma);
+}
+
static void hantro_g2_check_idle(struct hantro_dev *vpu)
{
int i;
@@ -556,6 +606,8 @@ int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
set_buffers(ctx);
prepare_tile_info_buffer(ctx);
+ prepare_scaling_list_buffer(ctx);
+
hantro_end_prepare_run(ctx);
hantro_reg_write(vpu, &g2_mode, HEVC_DEC_MODE);
diff --git a/drivers/staging/media/hantro/hantro_hevc.c b/drivers/staging/media/hantro/hantro_hevc.c
index 5347f5a41c2a..ee03123e7704 100644
--- a/drivers/staging/media/hantro/hantro_hevc.c
+++ b/drivers/staging/media/hantro/hantro_hevc.c
@@ -20,6 +20,8 @@
/* tile border coefficients of filter */
#define VERT_SAO_RAM_SIZE 48 /* bytes per pixel */
+#define SCALING_LIST_SIZE (16 * 64)
+
#define MAX_TILE_COLS 20
#define MAX_TILE_ROWS 22
@@ -256,6 +258,11 @@ int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx)
if (WARN_ON(!ctrls->decode_params))
return -EINVAL;
+ ctrls->scaling =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX);
+ if (WARN_ON(!ctrls->scaling))
+ return -EINVAL;
+
ctrls->sps =
hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_SPS);
if (WARN_ON(!ctrls->sps))
@@ -284,6 +291,12 @@ void hantro_hevc_dec_exit(struct hantro_ctx *ctx)
hevc_dec->tile_sizes.dma);
hevc_dec->tile_sizes.cpu = NULL;
+ if (hevc_dec->scaling_lists.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->scaling_lists.size,
+ hevc_dec->scaling_lists.cpu,
+ hevc_dec->scaling_lists.dma);
+ hevc_dec->scaling_lists.cpu = NULL;
+
if (hevc_dec->tile_filter.cpu)
dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
hevc_dec->tile_filter.cpu,
@@ -327,6 +340,14 @@ int hantro_hevc_dec_init(struct hantro_ctx *ctx)
hevc_dec->tile_sizes.size = size;
+ hevc_dec->scaling_lists.cpu = dma_alloc_coherent(vpu->dev, SCALING_LIST_SIZE,
+ &hevc_dec->scaling_lists.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->scaling_lists.cpu)
+ return -ENOMEM;
+
+ hevc_dec->scaling_lists.size = SCALING_LIST_SIZE;
+
hantro_hevc_ref_init(ctx);
return 0;
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index df7b5e3a57b9..267a6d33a47b 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -104,6 +104,7 @@ struct hantro_h264_dec_hw_ctx {
/**
* struct hantro_hevc_dec_ctrls
* @decode_params: Decode params
+ * @scaling: Scaling matrix
* @sps: SPS info
* @pps: PPS info
* @hevc_hdr_skip_length: the number of data (in bits) to skip in the
@@ -112,6 +113,7 @@ struct hantro_h264_dec_hw_ctx {
*/
struct hantro_hevc_dec_ctrls {
const struct v4l2_ctrl_hevc_decode_params *decode_params;
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling;
const struct v4l2_ctrl_hevc_sps *sps;
const struct v4l2_ctrl_hevc_pps *pps;
u32 hevc_hdr_skip_length;
@@ -124,6 +126,7 @@ struct hantro_hevc_dec_ctrls {
* @tile_sao: Tile SAO buffer
* @tile_bsd: Tile BSD control buffer
* @ref_bufs: Internal reference buffers
+ * @scaling_lists: Scaling lists buffer
* @ref_bufs_poc: Internal reference buffers picture order count
* @ref_bufs_used: Bitfield of used reference buffers
* @ctrls: V4L2 controls attached to a run
@@ -135,6 +138,7 @@ struct hantro_hevc_dec_hw_ctx {
struct hantro_aux_buf tile_sao;
struct hantro_aux_buf tile_bsd;
struct hantro_aux_buf ref_bufs[NUM_REF_PICTURES];
+ struct hantro_aux_buf scaling_lists;
int ref_bufs_poc[NUM_REF_PICTURES];
u32 ref_bufs_used;
struct hantro_hevc_dec_ctrls ctrls;
diff --git a/drivers/staging/media/imx/TODO b/drivers/staging/media/imx/TODO
index 2384f4c6b09d..06c94f20ecf8 100644
--- a/drivers/staging/media/imx/TODO
+++ b/drivers/staging/media/imx/TODO
@@ -27,8 +27,3 @@
- i.MX7: all of the above, since it uses the imx media core
- i.MX7: use Frame Interval Monitor
-
-- i.MX7: runtime testing with parallel sensor, links setup and streaming
-
-- i.MX7: runtime testing with different formats, for the time only 10-bit bayer
- is tested
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index bb1305c9daaf..1caa100be33d 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -139,6 +139,8 @@ static inline bool is_parallel_16bit_bus(struct v4l2_fwnode_endpoint *ep)
* Check for conditions that require the IPU to handle the
* data internally as generic data, aka passthrough mode:
* - raw bayer media bus formats, or
+ * - BT.656 and BT.1120 (8/10-bit YUV422) data can always be processed
+ * on-the-fly
* - the CSI is receiving from a 16-bit parallel bus, or
* - the CSI is receiving from an 8-bit parallel bus and the incoming
* media bus format is other than UYVY8_2X8/YUYV8_2X8.
@@ -147,6 +149,9 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
struct v4l2_mbus_framefmt *infmt,
const struct imx_media_pixfmt *incc)
{
+ if (ep->bus_type == V4L2_MBUS_BT656) // including BT.1120
+ return 0;
+
return incc->bayer || is_parallel_16bit_bus(ep) ||
(is_parallel_bus(ep) &&
infmt->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
@@ -1924,7 +1929,7 @@ static int imx_csi_async_register(struct csi_priv *priv)
unsigned int port;
int ret;
- v4l2_async_notifier_init(&priv->notifier);
+ v4l2_async_nf_init(&priv->notifier);
/* get this CSI's port id */
ret = fwnode_property_read_u32(dev_fwnode(priv->dev), "reg", &port);
@@ -1935,8 +1940,8 @@ static int imx_csi_async_register(struct csi_priv *priv)
port, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (ep) {
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &priv->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&priv->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
@@ -1950,8 +1955,7 @@ static int imx_csi_async_register(struct csi_priv *priv)
priv->notifier.ops = &csi_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&priv->sd,
- &priv->notifier);
+ ret = v4l2_async_subdev_nf_register(&priv->sd, &priv->notifier);
if (ret)
return ret;
@@ -2040,8 +2044,8 @@ static int imx_csi_probe(struct platform_device *pdev)
return 0;
cleanup:
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
free:
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
@@ -2055,8 +2059,8 @@ static int imx_csi_remove(struct platform_device *pdev)
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
@@ -2082,4 +2086,3 @@ module_platform_driver(imx_csi_driver);
MODULE_DESCRIPTION("i.MX CSI subdev driver");
MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-ipuv3-csi");
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
index d186179388d0..80b69a9a752c 100644
--- a/drivers/staging/media/imx/imx-media-dev-common.c
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -367,6 +367,8 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev,
imxmd->v4l2_dev.notify = imx_media_notify;
strscpy(imxmd->v4l2_dev.name, "imx-media",
sizeof(imxmd->v4l2_dev.name));
+ snprintf(imxmd->md.bus_info, sizeof(imxmd->md.bus_info),
+ "platform:%s", dev_name(imxmd->md.dev));
media_device_init(&imxmd->md);
@@ -379,7 +381,7 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev,
INIT_LIST_HEAD(&imxmd->vdev_list);
- v4l2_async_notifier_init(&imxmd->notifier);
+ v4l2_async_nf_init(&imxmd->notifier);
return imxmd;
@@ -403,11 +405,10 @@ int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
/* prepare the async subdev notifier and register it */
imxmd->notifier.ops = ops ? ops : &imx_media_notifier_ops;
- ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
- &imxmd->notifier);
+ ret = v4l2_async_nf_register(&imxmd->v4l2_dev, &imxmd->notifier);
if (ret) {
v4l2_err(&imxmd->v4l2_dev,
- "v4l2_async_notifier_register failed with %d\n", ret);
+ "v4l2_async_nf_register failed with %d\n", ret);
return ret;
}
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 338b8bd0bb07..f85462214e22 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -94,7 +94,7 @@ static int imx_media_probe(struct platform_device *pdev)
return 0;
cleanup:
- v4l2_async_notifier_cleanup(&imxmd->notifier);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
@@ -113,9 +113,9 @@ static int imx_media_remove(struct platform_device *pdev)
imxmd->m2m_vdev = NULL;
}
- v4l2_async_notifier_unregister(&imxmd->notifier);
+ v4l2_async_nf_unregister(&imxmd->notifier);
imx_media_unregister_ipu_internal_subdevs(imxmd);
- v4l2_async_notifier_cleanup(&imxmd->notifier);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index b677cf0e0c84..59f1eb7b62bc 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -29,9 +29,9 @@ int imx_media_of_add_csi(struct imx_media_dev *imxmd,
}
/* add CSI fwnode to async notifier */
- asd = v4l2_async_notifier_add_fwnode_subdev(&imxmd->notifier,
- of_fwnode_handle(csi_np),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(&imxmd->notifier,
+ of_fwnode_handle(csi_np),
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
if (ret == -EEXIST)
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index 9de0ebd439dc..a0941fc2907b 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -647,7 +647,7 @@ static int csi2_async_register(struct csi2_dev *csi2)
struct fwnode_handle *ep;
int ret;
- v4l2_async_notifier_init(&csi2->notifier);
+ v4l2_async_nf_init(&csi2->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi2->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -663,8 +663,8 @@ static int csi2_async_register(struct csi2_dev *csi2)
dev_dbg(csi2->dev, "data lanes: %d\n", vep.bus.mipi_csi2.num_data_lanes);
dev_dbg(csi2->dev, "flags: 0x%08x\n", vep.bus.mipi_csi2.flags);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &csi2->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
if (IS_ERR(asd))
@@ -672,8 +672,7 @@ static int csi2_async_register(struct csi2_dev *csi2)
csi2->notifier.ops = &csi2_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&csi2->sd,
- &csi2->notifier);
+ ret = v4l2_async_subdev_nf_register(&csi2->sd, &csi2->notifier);
if (ret)
return ret;
@@ -768,8 +767,8 @@ static int csi2_probe(struct platform_device *pdev)
return 0;
clean_notifier:
- v4l2_async_notifier_unregister(&csi2->notifier);
- v4l2_async_notifier_cleanup(&csi2->notifier);
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
clk_disable_unprepare(csi2->dphy_clk);
pllref_off:
clk_disable_unprepare(csi2->pllref_clk);
@@ -783,8 +782,8 @@ static int csi2_remove(struct platform_device *pdev)
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csi2_dev *csi2 = sd_to_dev(sd);
- v4l2_async_notifier_unregister(&csi2->notifier);
- v4l2_async_notifier_cleanup(&csi2->notifier);
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
v4l2_async_unregister_subdev(sd);
clk_disable_unprepare(csi2->dphy_clk);
clk_disable_unprepare(csi2->pllref_clk);
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 127183732912..2288dadb2683 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1099,13 +1099,13 @@ static int imx7_csi_async_register(struct imx7_csi *csi)
struct fwnode_handle *ep;
int ret;
- v4l2_async_notifier_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (ep) {
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &csi->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
@@ -1119,7 +1119,7 @@ static int imx7_csi_async_register(struct imx7_csi *csi)
csi->notifier.ops = &imx7_csi_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&csi->sd, &csi->notifier);
+ ret = v4l2_async_subdev_nf_register(&csi->sd, &csi->notifier);
if (ret)
return ret;
@@ -1210,12 +1210,12 @@ static int imx7_csi_probe(struct platform_device *pdev)
return 0;
subdev_notifier_cleanup:
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
cleanup:
- v4l2_async_notifier_unregister(&imxmd->notifier);
- v4l2_async_notifier_cleanup(&imxmd->notifier);
+ v4l2_async_nf_unregister(&imxmd->notifier);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_unregister(&imxmd->md);
media_device_cleanup(&imxmd->md);
@@ -1232,15 +1232,15 @@ static int imx7_csi_remove(struct platform_device *pdev)
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
struct imx_media_dev *imxmd = csi->imxmd;
- v4l2_async_notifier_unregister(&imxmd->notifier);
- v4l2_async_notifier_cleanup(&imxmd->notifier);
+ v4l2_async_nf_unregister(&imxmd->notifier);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
v4l2_async_unregister_subdev(sd);
mutex_destroy(&csi->lock);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 41e33535de55..2b73fa55c938 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -1162,7 +1162,7 @@ static int mipi_csis_async_register(struct csi_state *state)
unsigned int i;
int ret;
- v4l2_async_notifier_init(&state->notifier);
+ v4l2_async_nf_init(&state->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(state->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -1187,8 +1187,8 @@ static int mipi_csis_async_register(struct csi_state *state)
dev_dbg(state->dev, "data lanes: %d\n", state->bus.num_data_lanes);
dev_dbg(state->dev, "flags: 0x%08x\n", state->bus.flags);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &state->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&state->notifier, ep,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto err_parse;
@@ -1198,7 +1198,7 @@ static int mipi_csis_async_register(struct csi_state *state)
state->notifier.ops = &mipi_csis_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&state->sd, &state->notifier);
+ ret = v4l2_async_subdev_nf_register(&state->sd, &state->notifier);
if (ret)
return ret;
@@ -1429,8 +1429,8 @@ unregister_all:
mipi_csis_debugfs_exit(state);
cleanup:
media_entity_cleanup(&state->sd.entity);
- v4l2_async_notifier_unregister(&state->notifier);
- v4l2_async_notifier_cleanup(&state->notifier);
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->sd);
disable_clock:
mipi_csis_clk_disable(state);
@@ -1445,8 +1445,8 @@ static int mipi_csis_remove(struct platform_device *pdev)
struct csi_state *state = mipi_sd_to_csis_state(sd);
mipi_csis_debugfs_exit(state);
- v4l2_async_notifier_unregister(&state->notifier);
- v4l2_async_notifier_cleanup(&state->notifier);
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->sd);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/staging/media/imx/imx8mq-mipi-csi2.c b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
index a6f562009b9a..7adbdd14daa9 100644
--- a/drivers/staging/media/imx/imx8mq-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
@@ -643,7 +643,7 @@ static int imx8mq_mipi_csi_async_register(struct csi_state *state)
unsigned int i;
int ret;
- v4l2_async_notifier_init(&state->notifier);
+ v4l2_async_nf_init(&state->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(state->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -669,8 +669,8 @@ static int imx8mq_mipi_csi_async_register(struct csi_state *state)
state->bus.num_data_lanes,
state->bus.flags);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&state->notifier,
- ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&state->notifier, ep,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto err_parse;
@@ -680,7 +680,7 @@ static int imx8mq_mipi_csi_async_register(struct csi_state *state)
state->notifier.ops = &imx8mq_mipi_csi_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&state->sd, &state->notifier);
+ ret = v4l2_async_subdev_nf_register(&state->sd, &state->notifier);
if (ret)
return ret;
@@ -937,8 +937,8 @@ cleanup:
imx8mq_mipi_csi_pm_suspend(&pdev->dev, true);
media_entity_cleanup(&state->sd.entity);
- v4l2_async_notifier_unregister(&state->notifier);
- v4l2_async_notifier_cleanup(&state->notifier);
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->sd);
icc:
imx8mq_mipi_csi_release_icc(pdev);
@@ -953,8 +953,8 @@ static int imx8mq_mipi_csi_remove(struct platform_device *pdev)
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
- v4l2_async_notifier_unregister(&state->notifier);
- v4l2_async_notifier_cleanup(&state->notifier);
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->sd);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index fa3d6ee5adf2..585f55981c86 100644
--- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -234,7 +234,9 @@ struct ipu3_uapi_ae_ccm {
* struct ipu3_uapi_ae_config - AE config
*
* @grid_cfg: config for auto exposure statistics grid. See struct
- * &ipu3_uapi_ae_grid_config
+ * &ipu3_uapi_ae_grid_config, as Imgu did not support output
+ * auto exposure statistics, so user can ignore this configuration
+ * and use the RGB table in auto-whitebalance statistics instead.
* @weights: &IPU3_UAPI_AE_WEIGHTS is based on 32x24 blocks in the grid.
* Each grid cell has a corresponding value in weights LUT called
* grid value, global histogram is updated based on grid value and
@@ -534,6 +536,9 @@ struct ipu3_uapi_ff_status {
*
* @awb_raw_buffer: auto white balance meta data &ipu3_uapi_awb_raw_buffer
* @ae_raw_buffer: auto exposure raw data &ipu3_uapi_ae_raw_buffer_aligned
+ * current Imgu does not output the auto exposure statistics
+ * to ae_raw_buffer, the user such as 3A algorithm can use the
+ * RGB table in &ipu3_uapi_awb_raw_buffer to do auto-exposure.
* @af_raw_buffer: &ipu3_uapi_af_raw_buffer for auto focus meta data
* @awb_fr_raw_buffer: value as specified by &ipu3_uapi_awb_fr_raw_buffer
* @stats_4a_config: 4a statistics config as defined by &ipu3_uapi_4a_config.
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.c b/drivers/staging/media/ipu3/ipu3-css-fw.c
index 45aff76198e2..981693eed815 100644
--- a/drivers/staging/media/ipu3/ipu3-css-fw.c
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.c
@@ -124,12 +124,11 @@ int imgu_css_fw_init(struct imgu_css *css)
/* Check and display fw header info */
css->fwp = (struct imgu_fw_header *)css->fw->data;
- if (css->fw->size < sizeof(struct imgu_fw_header *) ||
+ if (css->fw->size < struct_size(css->fwp, binary_header, 1) ||
css->fwp->file_header.h_size != sizeof(struct imgu_fw_bi_file_h))
goto bad_fw;
- if (sizeof(struct imgu_fw_bi_file_h) +
- css->fwp->file_header.binary_nr * sizeof(struct imgu_fw_info) >
- css->fw->size)
+ if (struct_size(css->fwp, binary_header,
+ css->fwp->file_header.binary_nr) > css->fw->size)
goto bad_fw;
dev_info(dev, "loaded firmware version %.64s, %u binaries, %zu bytes\n",
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.h b/drivers/staging/media/ipu3/ipu3-css-fw.h
index 3c078f15a295..c0bc57fd678a 100644
--- a/drivers/staging/media/ipu3/ipu3-css-fw.h
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.h
@@ -171,7 +171,7 @@ struct imgu_fw_bi_file_h {
struct imgu_fw_header {
struct imgu_fw_bi_file_h file_header;
- struct imgu_fw_info binary_header[1]; /* binary_nr items */
+ struct imgu_fw_info binary_header[]; /* binary_nr items */
};
/******************* Firmware functions *******************/
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 608dcacf12b2..8c70497d744c 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -5,6 +5,7 @@
#include <linux/iopoll.h>
#include <linux/slab.h>
+#include "ipu3.h"
#include "ipu3-css.h"
#include "ipu3-css-fw.h"
#include "ipu3-css-params.h"
@@ -53,7 +54,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_NV12,
.osys_format = IMGU_ABI_OSYS_FORMAT_NV12,
.osys_tiling = IMGU_ABI_OSYS_TILING_NONE,
- .bytesperpixel_num = 1 * IPU3_CSS_FORMAT_BPP_DEN,
.chroma_decim = 4,
.width_align = IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_OUT | IPU3_CSS_FORMAT_FL_VF,
@@ -64,7 +64,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_BGGR,
.bit_depth = 10,
- .bytesperpixel_num = 64,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
@@ -73,7 +72,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_GBRG,
.bit_depth = 10,
- .bytesperpixel_num = 64,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
@@ -82,7 +80,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_GRBG,
.bit_depth = 10,
- .bytesperpixel_num = 64,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
@@ -91,7 +88,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_RGGB,
.bit_depth = 10,
- .bytesperpixel_num = 64,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
},
@@ -150,17 +146,8 @@ static int imgu_css_queue_init(struct imgu_css_queue *queue,
f->height = ALIGN(clamp_t(u32, f->height,
IPU3_CSS_MIN_RES, IPU3_CSS_MAX_H), 2);
queue->width_pad = ALIGN(f->width, queue->css_fmt->width_align);
- if (queue->css_fmt->frame_format != IMGU_ABI_FRAME_FORMAT_RAW_PACKED)
- f->plane_fmt[0].bytesperline = DIV_ROUND_UP(queue->width_pad *
- queue->css_fmt->bytesperpixel_num,
- IPU3_CSS_FORMAT_BPP_DEN);
- else
- /* For packed raw, alignment for bpl is by 50 to the width */
- f->plane_fmt[0].bytesperline =
- DIV_ROUND_UP(f->width,
- IPU3_CSS_FORMAT_BPP_DEN) *
- queue->css_fmt->bytesperpixel_num;
-
+ f->plane_fmt[0].bytesperline =
+ imgu_bytesperline(f->width, queue->css_fmt->frame_format);
sizeimage = f->height * f->plane_fmt[0].bytesperline;
if (queue->css_fmt->chroma_decim)
sizeimage += 2 * sizeimage / queue->css_fmt->chroma_decim;
diff --git a/drivers/staging/media/ipu3/ipu3-css.h b/drivers/staging/media/ipu3/ipu3-css.h
index 6108a068b228..ab64e9521203 100644
--- a/drivers/staging/media/ipu3/ipu3-css.h
+++ b/drivers/staging/media/ipu3/ipu3-css.h
@@ -82,7 +82,6 @@ struct imgu_css_format {
enum imgu_abi_bayer_order bayer_order;
enum imgu_abi_osys_format osys_format;
enum imgu_abi_osys_tiling osys_tiling;
- u32 bytesperpixel_num; /* Bytes per pixel in first plane * 50 */
u8 bit_depth; /* Effective bits per pixel */
u8 chroma_decim; /* Chroma plane decimation, 0=no chroma plane */
u8 width_align; /* Alignment requirement for width_pad */
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 38a240764509..0473457b4e64 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -592,11 +592,12 @@ static const struct imgu_fmt *find_format(struct v4l2_format *f, u32 type)
static int imgu_vidioc_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct imgu_video_device *node = file_to_intel_imgu_node(file);
+ struct imgu_device *imgu = video_drvdata(file);
strscpy(cap->driver, IMGU_NAME, sizeof(cap->driver));
strscpy(cap->card, IMGU_NAME, sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", node->name);
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
+ pci_name(imgu->pci_dev));
return 0;
}
@@ -696,7 +697,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
/* CSS expects some format on OUT queue */
if (i != IPU3_CSS_QUEUE_OUT &&
- !imgu_pipe->nodes[inode].enabled) {
+ !imgu_pipe->nodes[inode].enabled && !try) {
fmts[i] = NULL;
continue;
}
@@ -864,7 +865,7 @@ static int imgu_vidioc_g_meta_fmt(struct file *file, void *fh,
/******************** function pointers ********************/
-static struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = {
+static const struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = {
.open = imgu_subdev_open,
};
@@ -1136,7 +1137,9 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
def_pix_fmt.height = def_bus_fmt.height;
def_pix_fmt.field = def_bus_fmt.field;
def_pix_fmt.num_planes = 1;
- def_pix_fmt.plane_fmt[0].bytesperline = def_pix_fmt.width * 2;
+ def_pix_fmt.plane_fmt[0].bytesperline =
+ imgu_bytesperline(def_pix_fmt.width,
+ IMGU_ABI_FRAME_FORMAT_RAW_PACKED);
def_pix_fmt.plane_fmt[0].sizeimage =
def_pix_fmt.height * def_pix_fmt.plane_fmt[0].bytesperline;
def_pix_fmt.flags = 0;
diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h
index eb46b527dd23..d2ad0a95c5aa 100644
--- a/drivers/staging/media/ipu3/ipu3.h
+++ b/drivers/staging/media/ipu3/ipu3.h
@@ -164,4 +164,16 @@ void imgu_v4l2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
int imgu_s_stream(struct imgu_device *imgu, int enable);
+static inline u32 imgu_bytesperline(const unsigned int width,
+ enum imgu_abi_frame_format frame_format)
+{
+ if (frame_format == IMGU_ABI_FRAME_FORMAT_NV12)
+ return ALIGN(width, IPU3_UAPI_ISP_VEC_ELEMS);
+ /*
+ * 64 bytes for every 50 pixels, the line length
+ * in bytes is multiple of 64 (line end alignment).
+ */
+ return DIV_ROUND_UP(width, 50) * 64;
+}
+
#endif
diff --git a/drivers/staging/media/meson/vdec/esparser.h b/drivers/staging/media/meson/vdec/esparser.h
index ff51fe7fda66..9351e62c70e6 100644
--- a/drivers/staging/media/meson/vdec/esparser.h
+++ b/drivers/staging/media/meson/vdec/esparser.h
@@ -17,13 +17,17 @@ int esparser_power_up(struct amvdec_session *sess);
/**
* esparser_queue_eos() - write End Of Stream sequence to the ESPARSER
*
- * @core vdec core struct
+ * @core: vdec core struct
+ * @data: EOS sequence
+ * @len: length of EOS sequence
*/
int esparser_queue_eos(struct amvdec_core *core, const u8 *data, u32 len);
/**
* esparser_queue_all_src() - work handler that writes as many src buffers
* as possible to the ESPARSER
+ *
+ * @work: work struct
*/
void esparser_queue_all_src(struct work_struct *work);
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index e51d69c4729d..8549d95be0f2 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -994,7 +994,6 @@ static int vdec_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct video_device *vdev;
struct amvdec_core *core;
- struct resource *r;
const struct of_device_id *of_id;
int irq;
int ret;
@@ -1006,13 +1005,11 @@ static int vdec_probe(struct platform_device *pdev)
core->dev = dev;
platform_set_drvdata(pdev, core);
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dos");
- core->dos_base = devm_ioremap_resource(dev, r);
+ core->dos_base = devm_platform_ioremap_resource_byname(pdev, "dos");
if (IS_ERR(core->dos_base))
return PTR_ERR(core->dos_base);
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "esparser");
- core->esparser_base = devm_ioremap_resource(dev, r);
+ core->esparser_base = devm_platform_ioremap_resource_byname(pdev, "esparser");
if (IS_ERR(core->esparser_base))
return PTR_ERR(core->esparser_base);
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
index f95445ac0658..0906b8fb5cc6 100644
--- a/drivers/staging/media/meson/vdec/vdec.h
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -60,10 +60,12 @@ struct amvdec_session;
* @dos_clk: DOS clock
* @vdec_1_clk: VDEC_1 clock
* @vdec_hevc_clk: VDEC_HEVC clock
+ * @vdec_hevcf_clk: VDEC_HEVCF clock
* @esparser_reset: RESET for the PARSER
- * @vdec_dec: video device for the decoder
+ * @vdev_dec: video device for the decoder
* @v4l2_dev: v4l2 device
* @cur_sess: current decoding session
+ * @lock: video device lock
*/
struct amvdec_core {
void __iomem *dos_base;
@@ -88,7 +90,7 @@ struct amvdec_core {
struct v4l2_device v4l2_dev;
struct amvdec_session *cur_sess;
- struct mutex lock; /* video device lock */
+ struct mutex lock;
};
/**
@@ -120,6 +122,7 @@ struct amvdec_ops {
* @recycle: optional call to tell the codec to recycle a dst buffer. Must go
* in pair with @can_recycle
* @drain: optional call if the codec has a custom way of draining
+ * @resume: optional call to resume after a resolution change
* @eos_sequence: optional call to get an end sequence to send to esparser
* for flush. Mutually exclusive with @drain.
* @isr: mandatory call when the ISR triggers
@@ -185,6 +188,7 @@ enum amvdec_status {
* @m2m_ctx: v4l2 m2m context
* @ctrl_handler: V4L2 control handler
* @ctrl_min_buf_capture: V4L2 control V4L2_CID_MIN_BUFFERS_FOR_CAPTURE
+ * @lock: cap & out queues lock
* @fmt_out: vdec pixel format for the OUTPUT queue
* @pixfmt_cap: V4L2 pixel format for the CAPTURE queue
* @src_buffer_size: size in bytes of the OUTPUT buffers' only plane
@@ -200,9 +204,12 @@ enum amvdec_status {
* @streamon_cap: stream on flag for capture queue
* @streamon_out: stream on flag for output queue
* @sequence_cap: capture sequence counter
+ * @sequence_out: output sequence counter
* @should_stop: flag set if userspace signaled EOS via command
* or empty buffer
* @keyframe_found: flag set once a keyframe has been parsed
+ * @num_dst_bufs: number of destination buffers
+ * @changed_format: the format changed
* @canvas_alloc: array of all the canvas IDs allocated
* @canvas_num: number of canvas IDs allocated
* @vififo_vaddr: virtual address for the VIFIFO
@@ -214,6 +221,9 @@ enum amvdec_status {
* @timestamps: chronological list of src timestamps
* @ts_spinlock: spinlock for the timestamps list
* @last_irq_jiffies: tracks last time the vdec triggered an IRQ
+ * @last_offset: tracks last offset of vififo
+ * @wrap_count: number of times the vififo wrapped around
+ * @fw_idx_to_vb2_idx: firmware buffer index to vb2 buffer index
* @status: current decoding status
* @priv: codec private data
*/
@@ -225,7 +235,7 @@ struct amvdec_session {
struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *ctrl_min_buf_capture;
- struct mutex lock; /* cap & out queues lock */
+ struct mutex lock;
const struct amvdec_format *fmt_out;
u32 pixfmt_cap;
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
index cfaed52ab526..88137d15aa3a 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.h
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
@@ -52,8 +52,9 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
*
* @sess: current session
* @ts: timestamp to add
+ * @tc: timecode to add
* @offset: offset in the VIFIFO where the associated packet was written
- * @flags the vb2_v4l2_buffer flags
+ * @flags: the vb2_v4l2_buffer flags
*/
void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
struct v4l2_timecode tc, u32 offset, u32 flags);
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 76e97cbe2512..951e19231da2 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -1015,8 +1015,9 @@ static int rkvdec_h264_adjust_fmt(struct rkvdec_ctx *ctx,
struct v4l2_pix_format_mplane *fmt = &f->fmt.pix_mp;
fmt->num_planes = 1;
- fmt->plane_fmt[0].sizeimage = fmt->width * fmt->height *
- RKVDEC_H264_MAX_DEPTH_IN_BYTES;
+ if (!fmt->plane_fmt[0].sizeimage)
+ fmt->plane_fmt[0].sizeimage = fmt->width * fmt->height *
+ RKVDEC_H264_MAX_DEPTH_IN_BYTES;
return 0;
}
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index 7131156c1f2c..4fd4a2907da7 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -280,31 +280,20 @@ static int rkvdec_try_output_fmt(struct file *file, void *priv,
return 0;
}
-static int rkvdec_s_fmt(struct file *file, void *priv,
- struct v4l2_format *f,
- int (*try_fmt)(struct file *, void *,
- struct v4l2_format *))
+static int rkvdec_s_capture_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
struct vb2_queue *vq;
+ int ret;
- if (!try_fmt)
- return -EINVAL;
-
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ /* Change not allowed if queue is busy */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(vq))
return -EBUSY;
- return try_fmt(file, priv, f);
-}
-
-static int rkvdec_s_capture_fmt(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
- int ret;
-
- ret = rkvdec_s_fmt(file, priv, f, rkvdec_try_capture_fmt);
+ ret = rkvdec_try_capture_fmt(file, priv, f);
if (ret)
return ret;
@@ -319,10 +308,21 @@ static int rkvdec_s_output_fmt(struct file *file, void *priv,
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct rkvdec_coded_fmt_desc *desc;
struct v4l2_format *cap_fmt;
- struct vb2_queue *peer_vq;
+ struct vb2_queue *peer_vq, *vq;
int ret;
/*
+ * In order to support dynamic resolution change, the decoder admits
+ * a resolution change, as long as the pixelformat remains. Can't be
+ * done if streaming.
+ */
+ vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (vb2_is_streaming(vq) ||
+ (vb2_is_busy(vq) &&
+ f->fmt.pix_mp.pixelformat != ctx->coded_fmt.fmt.pix_mp.pixelformat))
+ return -EBUSY;
+
+ /*
* Since format change on the OUTPUT queue will reset the CAPTURE
* queue, we can't allow doing so when the CAPTURE queue has buffers
* allocated.
@@ -331,7 +331,7 @@ static int rkvdec_s_output_fmt(struct file *file, void *priv,
if (vb2_is_busy(peer_vq))
return -EBUSY;
- ret = rkvdec_s_fmt(file, priv, f, rkvdec_try_output_fmt);
+ ret = rkvdec_try_output_fmt(file, priv, f);
if (ret)
return ret;
@@ -967,7 +967,6 @@ static const char * const rkvdec_clk_names[] = {
static int rkvdec_probe(struct platform_device *pdev)
{
struct rkvdec_dev *rkvdec;
- struct resource *res;
unsigned int i;
int ret, irq;
@@ -999,8 +998,7 @@ static int rkvdec_probe(struct platform_device *pdev)
*/
clk_set_rate(rkvdec->clocks[0].clk, 500 * 1000 * 1000);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rkvdec->regs = devm_ioremap_resource(&pdev->dev, res);
+ rkvdec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rkvdec->regs))
return PTR_ERR(rkvdec->regs);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index c0d005dafc6c..c76fc97d97a0 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -28,6 +28,50 @@
#include "cedrus_dec.h"
#include "cedrus_hw.h"
+static int cedrus_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
+ const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
+
+ if (sps->chroma_format_idc != 1)
+ /* Only 4:2:0 is supported */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
+ /* Luma and chroma bit depth mismatch */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != 0)
+ /* Only 8-bit is supported */
+ return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_SPS) {
+ const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
+ struct cedrus_ctx *ctx = container_of(ctrl->handler, struct cedrus_ctx, hdl);
+
+ if (sps->chroma_format_idc != 1)
+ /* Only 4:2:0 is supported */
+ return -EINVAL;
+
+ if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
+ /* Luma and chroma bit depth mismatch */
+ return -EINVAL;
+
+ if (ctx->dev->capabilities & CEDRUS_CAPABILITY_H265_10_DEC) {
+ if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2)
+ /* Only 8-bit and 10-bit are supported */
+ return -EINVAL;
+ } else {
+ if (sps->bit_depth_luma_minus8 != 0)
+ /* Only 8-bit is supported */
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops cedrus_ctrl_ops = {
+ .try_ctrl = cedrus_try_ctrl,
+};
+
static const struct cedrus_control cedrus_controls[] = {
{
.cfg = {
@@ -62,6 +106,7 @@ static const struct cedrus_control cedrus_controls[] = {
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_SPS,
+ .ops = &cedrus_ctrl_ops,
},
.codec = CEDRUS_CODEC_H264,
},
@@ -120,6 +165,7 @@ static const struct cedrus_control cedrus_controls[] = {
{
.cfg = {
.id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ .ops = &cedrus_ctrl_ops,
},
.codec = CEDRUS_CODEC_H265,
},
@@ -137,6 +183,12 @@ static const struct cedrus_control cedrus_controls[] = {
},
{
.cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ },
+ {
+ .cfg = {
.id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
.max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
.def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
@@ -207,6 +259,7 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
v4l2_ctrl_handler_free(hdl);
kfree(ctx->ctrls);
+ ctx->ctrls = NULL;
return hdl->error;
}
@@ -282,7 +335,7 @@ static int cedrus_open(struct file *file)
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto err_ctrls;
}
- ctx->dst_fmt.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
+ ctx->dst_fmt.pixelformat = V4L2_PIX_FMT_NV12_32L32;
cedrus_prepare_format(&ctx->dst_fmt);
ctx->src_fmt.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
/*
@@ -550,6 +603,7 @@ static const struct cedrus_variant sun50i_h6_cedrus_variant = {
CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC |
+ CEDRUS_CAPABILITY_H265_10_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 600000000,
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 88afba17b78b..c345f2984041 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -32,6 +32,7 @@
#define CEDRUS_CAPABILITY_H264_DEC BIT(2)
#define CEDRUS_CAPABILITY_MPEG2_DEC BIT(3)
#define CEDRUS_CAPABILITY_VP8_DEC BIT(4)
+#define CEDRUS_CAPABILITY_H265_10_DEC BIT(5)
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
@@ -78,6 +79,7 @@ struct cedrus_h265_run {
const struct v4l2_ctrl_hevc_pps *pps;
const struct v4l2_ctrl_hevc_slice_params *slice_params;
const struct v4l2_ctrl_hevc_decode_params *decode_params;
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling_matrix;
};
struct cedrus_vp8_run {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 40e8c4123f76..a16c1422558f 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -72,6 +72,8 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS);
run.h265.decode_params = cedrus_find_control_data(ctx,
V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS);
+ run.h265.scaling_matrix = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX);
break;
case V4L2_PIX_FMT_VP8_FRAME:
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index de7442d4834d..b4173a8926d6 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -520,6 +520,11 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
unsigned int mv_col_size;
int ret;
+ /*
+ * NOTE: All buffers allocated here are only used by HW, so we
+ * can add DMA_ATTR_NO_KERNEL_MAPPING flag when allocating them.
+ */
+
/* Formula for picture buffer size is taken from CedarX source. */
if (ctx->src_fmt.width > 2048)
@@ -538,23 +543,23 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
ctx->codec.h264.pic_info_buf_size = pic_info_size;
ctx->codec.h264.pic_info_buf =
- dma_alloc_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
- &ctx->codec.h264.pic_info_buf_dma,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
+ &ctx->codec.h264.pic_info_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.pic_info_buf)
return -ENOMEM;
/*
* That buffer is supposed to be 16kiB in size, and be aligned
- * on 16kiB as well. However, dma_alloc_coherent provides the
- * guarantee that we'll have a CPU and DMA address aligned on
- * the smallest page order that is greater to the requested
- * size, so we don't have to overallocate.
+ * on 16kiB as well. However, dma_alloc_attrs provides the
+ * guarantee that we'll have a DMA address aligned on the
+ * smallest page order that is greater to the requested size,
+ * so we don't have to overallocate.
*/
ctx->codec.h264.neighbor_info_buf =
- dma_alloc_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
- &ctx->codec.h264.neighbor_info_buf_dma,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h264.neighbor_info_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.neighbor_info_buf) {
ret = -ENOMEM;
goto err_pic_buf;
@@ -582,10 +587,11 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
mv_col_size = field_size * 2 * CEDRUS_H264_FRAME_NUM;
ctx->codec.h264.mv_col_buf_size = mv_col_size;
- ctx->codec.h264.mv_col_buf = dma_alloc_coherent(dev->dev,
- ctx->codec.h264.mv_col_buf_size,
- &ctx->codec.h264.mv_col_buf_dma,
- GFP_KERNEL);
+ ctx->codec.h264.mv_col_buf =
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h264.mv_col_buf_size,
+ &ctx->codec.h264.mv_col_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.mv_col_buf) {
ret = -ENOMEM;
goto err_neighbor_buf;
@@ -600,10 +606,10 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
ctx->codec.h264.deblk_buf_size =
ALIGN(ctx->src_fmt.width, 32) * 12;
ctx->codec.h264.deblk_buf =
- dma_alloc_coherent(dev->dev,
- ctx->codec.h264.deblk_buf_size,
- &ctx->codec.h264.deblk_buf_dma,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h264.deblk_buf_size,
+ &ctx->codec.h264.deblk_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.deblk_buf) {
ret = -ENOMEM;
goto err_mv_col_buf;
@@ -616,10 +622,10 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
ctx->codec.h264.intra_pred_buf_size =
ALIGN(ctx->src_fmt.width, 64) * 5 * 2;
ctx->codec.h264.intra_pred_buf =
- dma_alloc_coherent(dev->dev,
- ctx->codec.h264.intra_pred_buf_size,
- &ctx->codec.h264.intra_pred_buf_dma,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h264.intra_pred_buf_size,
+ &ctx->codec.h264.intra_pred_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.intra_pred_buf) {
ret = -ENOMEM;
goto err_deblk_buf;
@@ -629,24 +635,28 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
return 0;
err_deblk_buf:
- dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
- ctx->codec.h264.deblk_buf,
- ctx->codec.h264.deblk_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
err_mv_col_buf:
- dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
- ctx->codec.h264.mv_col_buf,
- ctx->codec.h264.mv_col_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
err_neighbor_buf:
- dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
- ctx->codec.h264.neighbor_info_buf,
- ctx->codec.h264.neighbor_info_buf_dma);
+ dma_free_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
err_pic_buf:
- dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
- ctx->codec.h264.pic_info_buf,
- ctx->codec.h264.pic_info_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
return ret;
}
@@ -654,23 +664,28 @@ static void cedrus_h264_stop(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
- dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
- ctx->codec.h264.mv_col_buf,
- ctx->codec.h264.mv_col_buf_dma);
- dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
- ctx->codec.h264.neighbor_info_buf,
- ctx->codec.h264.neighbor_info_buf_dma);
- dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
- ctx->codec.h264.pic_info_buf,
- ctx->codec.h264.pic_info_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ dma_free_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ dma_free_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
if (ctx->codec.h264.deblk_buf_size)
- dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
- ctx->codec.h264.deblk_buf,
- ctx->codec.h264.deblk_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
if (ctx->codec.h264.intra_pred_buf_size)
- dma_free_coherent(dev->dev, ctx->codec.h264.intra_pred_buf_size,
- ctx->codec.h264.intra_pred_buf,
- ctx->codec.h264.intra_pred_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.intra_pred_buf_size,
+ ctx->codec.h264.intra_pred_buf,
+ ctx->codec.h264.intra_pred_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
}
static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index ef0311a16d01..8829a7bab07e 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -238,6 +238,69 @@ static void cedrus_h265_skip_bits(struct cedrus_dev *dev, int num)
}
}
+static void cedrus_h265_write_scaling_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling;
+ struct cedrus_dev *dev = ctx->dev;
+ u32 i, j, k, val;
+
+ scaling = run->h265.scaling_matrix;
+
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_DC_COEF0,
+ (scaling->scaling_list_dc_coef_32x32[1] << 24) |
+ (scaling->scaling_list_dc_coef_32x32[0] << 16) |
+ (scaling->scaling_list_dc_coef_16x16[1] << 8) |
+ (scaling->scaling_list_dc_coef_16x16[0] << 0));
+
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_DC_COEF1,
+ (scaling->scaling_list_dc_coef_16x16[5] << 24) |
+ (scaling->scaling_list_dc_coef_16x16[4] << 16) |
+ (scaling->scaling_list_dc_coef_16x16[3] << 8) |
+ (scaling->scaling_list_dc_coef_16x16[2] << 0));
+
+ cedrus_h265_sram_write_offset(dev, VE_DEC_H265_SRAM_OFFSET_SCALING_LISTS);
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k += 4) {
+ val = ((u32)scaling->scaling_list_8x8[i][j + (k + 3) * 8] << 24) |
+ ((u32)scaling->scaling_list_8x8[i][j + (k + 2) * 8] << 16) |
+ ((u32)scaling->scaling_list_8x8[i][j + (k + 1) * 8] << 8) |
+ scaling->scaling_list_8x8[i][j + k * 8];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k += 4) {
+ val = ((u32)scaling->scaling_list_32x32[i][j + (k + 3) * 8] << 24) |
+ ((u32)scaling->scaling_list_32x32[i][j + (k + 2) * 8] << 16) |
+ ((u32)scaling->scaling_list_32x32[i][j + (k + 1) * 8] << 8) |
+ scaling->scaling_list_32x32[i][j + k * 8];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k += 4) {
+ val = ((u32)scaling->scaling_list_16x16[i][j + (k + 3) * 8] << 24) |
+ ((u32)scaling->scaling_list_16x16[i][j + (k + 2) * 8] << 16) |
+ ((u32)scaling->scaling_list_16x16[i][j + (k + 1) * 8] << 8) |
+ scaling->scaling_list_16x16[i][j + k * 8];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 4; j++) {
+ val = ((u32)scaling->scaling_list_4x4[i][j + 12] << 24) |
+ ((u32)scaling->scaling_list_4x4[i][j + 8] << 16) |
+ ((u32)scaling->scaling_list_4x4[i][j + 4] << 8) |
+ scaling->scaling_list_4x4[i][j];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+}
+
static void cedrus_h265_setup(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
@@ -287,11 +350,12 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
ctx->codec.h265.mv_col_buf_size = num_buffers *
ctx->codec.h265.mv_col_buf_unit_size;
+ /* Buffer is never accessed by CPU, so we can skip kernel mapping. */
ctx->codec.h265.mv_col_buf =
- dma_alloc_coherent(dev->dev,
- ctx->codec.h265.mv_col_buf_size,
- &ctx->codec.h265.mv_col_buf_addr,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h265.mv_col_buf_size,
+ &ctx->codec.h265.mv_col_buf_addr,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h265.mv_col_buf) {
ctx->codec.h265.mv_col_buf_size = 0;
// TODO: Abort the process here.
@@ -527,7 +591,12 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
/* Scaling list. */
- reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED) {
+ cedrus_h265_write_scaling_list(ctx, run);
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_FLAG_ENABLED;
+ } else {
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
+ }
cedrus_write(dev, VE_DEC_H265_SCALING_LIST_CTRL0, reg);
/* Neightbor information address. */
@@ -599,10 +668,11 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx)
/* The buffer size is calculated at setup time. */
ctx->codec.h265.mv_col_buf_size = 0;
+ /* Buffer is never accessed by CPU, so we can skip kernel mapping. */
ctx->codec.h265.neighbor_info_buf =
- dma_alloc_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
- &ctx->codec.h265.neighbor_info_buf_addr,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h265.neighbor_info_buf_addr,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h265.neighbor_info_buf)
return -ENOMEM;
@@ -614,16 +684,18 @@ static void cedrus_h265_stop(struct cedrus_ctx *ctx)
struct cedrus_dev *dev = ctx->dev;
if (ctx->codec.h265.mv_col_buf_size > 0) {
- dma_free_coherent(dev->dev, ctx->codec.h265.mv_col_buf_size,
- ctx->codec.h265.mv_col_buf,
- ctx->codec.h265.mv_col_buf_addr);
+ dma_free_attrs(dev->dev, ctx->codec.h265.mv_col_buf_size,
+ ctx->codec.h265.mv_col_buf,
+ ctx->codec.h265.mv_col_buf_addr,
+ DMA_ATTR_NO_KERNEL_MAPPING);
ctx->codec.h265.mv_col_buf_size = 0;
}
- dma_free_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
- ctx->codec.h265.neighbor_info_buf,
- ctx->codec.h265.neighbor_info_buf_addr);
+ dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h265.neighbor_info_buf,
+ ctx->codec.h265.neighbor_info_buf_addr,
+ DMA_ATTR_NO_KERNEL_MAPPING);
}
static void cedrus_h265_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index e2f2ff609c7e..2d7663726467 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -99,7 +99,7 @@ void cedrus_dst_format_set(struct cedrus_dev *dev,
cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
break;
- case V4L2_PIX_FMT_SUNXI_TILED_NV12:
+ case V4L2_PIX_FMT_NV12_32L32:
default:
reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index 92ace87c1c7d..bdb062ad8682 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -494,6 +494,8 @@
#define VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR (VE_ENGINE_DEC_H265 + 0x64)
#define VE_DEC_H265_TILE_START_CTB (VE_ENGINE_DEC_H265 + 0x68)
#define VE_DEC_H265_TILE_END_CTB (VE_ENGINE_DEC_H265 + 0x6c)
+#define VE_DEC_H265_SCALING_LIST_DC_COEF0 (VE_ENGINE_DEC_H265 + 0x78)
+#define VE_DEC_H265_SCALING_LIST_DC_COEF1 (VE_ENGINE_DEC_H265 + 0x7c)
#define VE_DEC_H265_LOW_ADDR (VE_ENGINE_DEC_H265 + 0x80)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index 825af5fd35e0..33726175d980 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -56,7 +56,7 @@ static struct cedrus_format cedrus_formats[] = {
.capabilities = CEDRUS_CAPABILITY_VP8_DEC,
},
{
- .pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
+ .pixelformat = V4L2_PIX_FMT_NV12_32L32,
.directions = CEDRUS_DECODE_DST,
},
{
@@ -124,7 +124,7 @@ void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
sizeimage = max_t(u32, SZ_1K, sizeimage);
break;
- case V4L2_PIX_FMT_SUNXI_TILED_NV12:
+ case V4L2_PIX_FMT_NV12_32L32:
/* 32-aligned stride. */
bytesperline = ALIGN(width, 32);
@@ -568,9 +568,9 @@ int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->dma_attrs = DMA_ATTR_NO_KERNEL_MAPPING;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct cedrus_buffer);
- src_vq->min_buffers_needed = 1;
src_vq->ops = &cedrus_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
@@ -587,7 +587,6 @@ int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct cedrus_buffer);
- dst_vq->min_buffers_needed = 1;
dst_vq->ops = &cedrus_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index d321790b07d9..69d9787d5338 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -1272,7 +1272,7 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
}
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
- v4l2_async_notifier_init(&chan->notifier);
+ v4l2_async_nf_init(&chan->notifier);
return 0;
@@ -1811,8 +1811,8 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
continue;
}
- tvge = v4l2_async_notifier_add_fwnode_subdev(&chan->notifier, remote,
- struct tegra_vi_graph_entity);
+ tvge = v4l2_async_nf_add_fwnode(&chan->notifier, remote,
+ struct tegra_vi_graph_entity);
if (IS_ERR(tvge)) {
ret = PTR_ERR(tvge);
dev_err(vi->dev,
@@ -1834,7 +1834,7 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
cleanup:
dev_err(vi->dev, "failed parsing the graph: %d\n", ret);
- v4l2_async_notifier_cleanup(&chan->notifier);
+ v4l2_async_nf_cleanup(&chan->notifier);
of_node_put(node);
return ret;
}
@@ -1868,13 +1868,12 @@ static int tegra_vi_graph_init(struct tegra_vi *vi)
continue;
chan->notifier.ops = &tegra_vi_async_ops;
- ret = v4l2_async_notifier_register(&vid->v4l2_dev,
- &chan->notifier);
+ ret = v4l2_async_nf_register(&vid->v4l2_dev, &chan->notifier);
if (ret < 0) {
dev_err(vi->dev,
"failed to register channel %d notifier: %d\n",
chan->portnos[0], ret);
- v4l2_async_notifier_cleanup(&chan->notifier);
+ v4l2_async_nf_cleanup(&chan->notifier);
}
}
@@ -1887,8 +1886,8 @@ static void tegra_vi_graph_cleanup(struct tegra_vi *vi)
list_for_each_entry(chan, &vi->vi_chans, list) {
vb2_video_unregister_device(&chan->video);
- v4l2_async_notifier_unregister(&chan->notifier);
- v4l2_async_notifier_cleanup(&chan->notifier);
+ v4l2_async_nf_unregister(&chan->notifier);
+ v4l2_async_nf_cleanup(&chan->notifier);
}
}
diff --git a/drivers/staging/r8188eu/include/ieee80211.h b/drivers/staging/r8188eu/include/ieee80211.h
index bc5b030e9c40..9204dd42f319 100644
--- a/drivers/staging/r8188eu/include/ieee80211.h
+++ b/drivers/staging/r8188eu/include/ieee80211.h
@@ -185,7 +185,7 @@ struct ieee_param {
struct {
u32 len;
u8 reserved[32];
- u8 data[0];
+ u8 data[];
} wpa_ie;
struct {
int command;
@@ -198,7 +198,7 @@ struct ieee_param {
u8 idx;
u8 seq[8]; /* sequence counter (set: RX, get: TX) */
u16 key_len;
- u8 key[0];
+ u8 key[];
} crypt;
#ifdef CONFIG_88EU_AP_MODE
struct {
@@ -210,7 +210,7 @@ struct ieee_param {
} add_sta;
struct {
u8 reserved[2];/* for set max_num_sta */
- u8 buf[0];
+ u8 buf[];
} bcn_ie;
#endif
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index 61eff7c5746b..65ceaca9b51e 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -78,7 +78,7 @@ struct ieee_param {
struct {
u32 len;
u8 reserved[32];
- u8 data[0];
+ u8 data[];
} wpa_ie;
struct {
int command;
@@ -91,7 +91,7 @@ struct ieee_param {
u8 idx;
u8 seq[8]; /* sequence counter (set: RX, get: TX) */
u16 key_len;
- u8 key[0];
+ u8 key[];
} crypt;
} u;
};
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index d6236f5b069d..c11d7e2d2347 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -172,7 +172,7 @@ struct ieee_param {
struct {
u32 len;
u8 reserved[32];
- u8 data[0];
+ u8 data[];
} wpa_ie;
struct{
int command;
@@ -185,7 +185,7 @@ struct ieee_param {
u8 idx;
u8 seq[8]; /* sequence counter (set: RX, get: TX) */
u16 key_len;
- u8 key[0];
+ u8 key[];
} crypt;
struct {
u16 aid;
@@ -196,7 +196,7 @@ struct ieee_param {
} add_sta;
struct {
u8 reserved[2];/* for set max_num_sta */
- u8 buf[0];
+ u8 buf[];
} bcn_ie;
} u;
};
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index ef4a8e189fba..8190b840065f 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/falloc.h>
#include <linux/uio.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
@@ -244,7 +245,7 @@ struct target_core_file_cmd {
struct bio_vec bvecs[];
};
-static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
+static void cmd_rw_aio_complete(struct kiocb *iocb, long ret)
{
struct target_core_file_cmd *cmd;
@@ -302,7 +303,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
ret = call_read_iter(file, &aio_cmd->iocb, &iter);
if (ret != -EIOCBQUEUED)
- cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
+ cmd_rw_aio_complete(&aio_cmd->iocb, ret);
return 0;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 4069a1edcfa3..b1ef041cacd8 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -16,12 +16,14 @@
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
@@ -230,9 +232,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
struct block_device *bd,
struct request_queue *q)
{
- unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
- bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
+ unsigned long long blocks_long =
+ div_u64(bdev_nr_bytes(bd), block_size) - 1;
if (block_size == dev->dev_attrib.block_size)
return blocks_long;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 75ef52f008ff..7fa57fb57bf2 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -980,11 +980,10 @@ pscsi_execute_cmd(struct se_cmd *cmd)
memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
scsi_command_size(cmd->t_task_cdb));
- req = blk_get_request(pdv->pdv_sd->request_queue,
+ req = scsi_alloc_request(pdv->pdv_sd->request_queue,
cmd->data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
- pr_err("PSCSI: blk_get_request() failed\n");
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail;
}
@@ -1012,7 +1011,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
return 0;
fail_put_request:
- blk_put_request(req);
+ blk_mq_free_request(req);
fail:
kfree(pt);
return ret;
@@ -1067,7 +1066,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
break;
}
- blk_put_request(req);
+ blk_mq_free_request(req);
kfree(pt);
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8260f38025b7..e20c19a0f106 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -831,7 +831,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
kthread_unuse_mm(io_data->mm);
}
- io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
+ io_data->kiocb->ki_complete(io_data->kiocb, ret);
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 539220d7f5b6..78be94750232 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -469,7 +469,7 @@ static void ep_user_copy_worker(struct work_struct *work)
ret = -EFAULT;
/* completing the iocb can drop the ctx and mm, don't touch mm after */
- iocb->ki_complete(iocb, ret, ret);
+ iocb->ki_complete(iocb, ret);
kfree(priv->buf);
kfree(priv->to_free);
@@ -496,11 +496,8 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
kfree(priv->to_free);
kfree(priv);
iocb->private = NULL;
- /* aio_complete() reports bytes-transferred _and_ faults */
-
iocb->ki_complete(iocb,
- req->actual ? req->actual : (long)req->status,
- req->status);
+ req->actual ? req->actual : (long)req->status);
} else {
/* ep_copy_to_user() won't report both; we hide some faults */
if (unlikely(0 != req->status))
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 26e3d90d1e7c..841667a896dd 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -80,6 +80,7 @@ struct vduse_dev {
struct vdpa_callback config_cb;
struct work_struct inject;
spinlock_t irq_lock;
+ struct rw_semaphore rwsem;
int minor;
bool broken;
bool connected;
@@ -410,6 +411,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
if (domain->bounce_map)
vduse_domain_reset_bounce_map(domain);
+ down_write(&dev->rwsem);
+
dev->status = 0;
dev->driver_features = 0;
dev->generation++;
@@ -443,6 +446,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
flush_work(&vq->inject);
flush_work(&vq->kick);
}
+
+ up_write(&dev->rwsem);
}
static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
@@ -885,6 +890,23 @@ static void vduse_vq_irq_inject(struct work_struct *work)
spin_unlock_irq(&vq->irq_lock);
}
+static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
+ struct work_struct *irq_work)
+{
+ int ret = -EINVAL;
+
+ down_read(&dev->rwsem);
+ if (!(dev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ goto unlock;
+
+ ret = 0;
+ queue_work(vduse_irq_wq, irq_work);
+unlock:
+ up_read(&dev->rwsem);
+
+ return ret;
+}
+
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -966,8 +988,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
break;
}
case VDUSE_DEV_INJECT_CONFIG_IRQ:
- ret = 0;
- queue_work(vduse_irq_wq, &dev->inject);
+ ret = vduse_dev_queue_irq_work(dev, &dev->inject);
break;
case VDUSE_VQ_SETUP: {
struct vduse_vq_config config;
@@ -1053,9 +1074,8 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
if (index >= dev->vq_num)
break;
- ret = 0;
index = array_index_nospec(index, dev->vq_num);
- queue_work(vduse_irq_wq, &dev->vqs[index].inject);
+ ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject);
break;
}
default:
@@ -1136,6 +1156,7 @@ static struct vduse_dev *vduse_dev_create(void)
INIT_LIST_HEAD(&dev->send_list);
INIT_LIST_HEAD(&dev->recv_list);
spin_lock_init(&dev->irq_lock);
+ init_rwsem(&dev->rwsem);
INIT_WORK(&dev->inject, vduse_dev_irq_inject);
init_waitqueue_head(&dev->waitq);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index dd95dfd85e98..3035bb6f5458 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -576,7 +576,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
if (!indirect && vq->use_dma_api)
- vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
+ vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
~VRING_DESC_F_NEXT;
if (indirect) {
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 643c6c2d0b72..ced2fc0deb8c 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -71,8 +71,6 @@
#define TCOBASE(p) ((p)->tco_res->start)
/* SMI Control and Enable Register */
#define SMI_EN(p) ((p)->smi_res->start)
-#define TCO_EN (1 << 13)
-#define GBL_SMI_EN (1 << 0)
#define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
#define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
@@ -357,12 +355,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
tmrval = seconds_to_ticks(p, t);
- /*
- * If TCO SMIs are off, the timer counts down twice before rebooting.
- * Otherwise, the BIOS generally reboots when the SMI triggers.
- */
- if (p->smi_res &&
- (inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
+ /* For TCO v1 the timer counts down twice before rebooting */
+ if (p->iTCO_version == 1)
tmrval /= 2;
/* from the specs: */
@@ -527,7 +521,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
* Disables TCO logic generating an SMI#
*/
val32 = inl(SMI_EN(p));
- val32 &= ~TCO_EN; /* Turn off SMI clearing watchdog */
+ val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN(p));
}
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 2693ffb24ac7..31b03fa71341 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -119,7 +119,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
if (!iwdt)
return -ENOMEM;
- iwdt->base = dev->platform_data;
+ iwdt->base = (void __iomem *)dev->platform_data;
/*
* Retrieve rate from a fixed clock from the device tree if
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 1616f93dfad7..74d785b2b478 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -268,8 +268,12 @@ static int omap_wdt_probe(struct platform_device *pdev)
wdev->wdog.bootstatus = WDIOF_CARDRESET;
}
- if (!early_enable)
+ if (early_enable) {
+ omap_wdt_start(&wdev->wdog);
+ set_bit(WDOG_HW_RUNNING, &wdev->wdog.status);
+ } else {
omap_wdt_disable(wdev);
+ }
ret = watchdog_register_device(&wdev->wdog);
if (ret) {
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index ee9ff38929eb..9791c74aebd4 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -130,7 +130,7 @@ static u64 sbsa_gwdt_reg_read(struct sbsa_gwdt *gwdt)
if (gwdt->version == 0)
return readl(gwdt->control_base + SBSA_GWDT_WOR);
else
- return readq(gwdt->control_base + SBSA_GWDT_WOR);
+ return lo_hi_readq(gwdt->control_base + SBSA_GWDT_WOR);
}
static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
@@ -138,7 +138,7 @@ static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
if (gwdt->version == 0)
writel((u32)val, gwdt->control_base + SBSA_GWDT_WOR);
else
- writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
+ lo_hi_writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
}
/*
@@ -411,4 +411,3 @@ MODULE_AUTHOR("Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>");
MODULE_AUTHOR("Al Stone <al.stone@linaro.org>");
MODULE_AUTHOR("Timur Tabi <timur@codeaurora.org>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);