summaryrefslogtreecommitdiff
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/Kconfig104
-rw-r--r--drivers/media/v4l2-core/Makefile60
-rw-r--r--drivers/media/v4l2-core/tuner-core.c273
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c986
-rw-r--r--drivers/media/v4l2-core/v4l2-cci.c203
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c242
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c1330
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c1623
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c1361
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c2802
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c1685
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h95
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c501
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2928
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c735
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c160
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c1275
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c169
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c47
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c746
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c1299
-rw-r--r--drivers/media/v4l2-core/v4l2-h264.c453
-rw-r--r--drivers/media/v4l2-core/v4l2-i2c.c185
-rw-r--r--drivers/media/v4l2-core/v4l2-int-device.c164
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c2660
-rw-r--r--drivers/media/v4l2-core/v4l2-isp.c132
-rw-r--r--drivers/media/v4l2-core/v4l2-jpeg.c713
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c614
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c1435
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c266
-rw-r--r--drivers/media/v4l2-core/v4l2-spi.c78
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev-priv.h14
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2487
-rw-r--r--drivers/media/v4l2-core/v4l2-trace.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-vp9.c1850
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c1193
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c405
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c635
-rw-r--r--drivers/media/v4l2-core/videobuf-dvb.c398
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c351
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c2710
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c774
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c294
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c187
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c279
45 files changed, 22708 insertions, 14205 deletions
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 8c05565a240e..d50ccac9733c 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -1,25 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# Generic video config states
#
-# Enable the V4L2 core and API
-config VIDEO_V4L2
- tristate
- depends on (I2C || I2C=n) && VIDEO_DEV
- default (I2C || I2C=n) && VIDEO_DEV
+config VIDEO_V4L2_I2C
+ bool
+ depends on I2C && VIDEO_DEV
+ default y
+
+config VIDEO_V4L2_SUBDEV_API
+ bool
+ depends on VIDEO_DEV && MEDIA_CONTROLLER
+ help
+ Enables the V4L2 sub-device pad-level userspace API used to configure
+ video format, size and frame rate between hardware blocks.
+
+ This API is mostly used by camera interfaces in embedded platforms.
config VIDEO_ADV_DEBUG
bool "Enable advanced debug functionality on V4L2 drivers"
- default n
- ---help---
+ help
Say Y here to enable advanced debugging functionality on some
V4L devices.
In doubt, say N.
config VIDEO_FIXED_MINOR_RANGES
bool "Enable old-style fixed minor ranges on drivers/video devices"
- default n
- ---help---
+ help
Say Y here to enable the old-style fixed-range minor assignments.
Only useful if you rely on the old behavior and use mknod instead of udev.
@@ -28,69 +35,54 @@ config VIDEO_FIXED_MINOR_RANGES
# Used by drivers that need tuner.ko
config VIDEO_TUNER
tristate
- depends on MEDIA_TUNER
-# Used by drivers that need v4l2-mem2mem.ko
-config V4L2_MEM2MEM_DEV
- tristate
- depends on VIDEOBUF2_CORE
-
-# Used by drivers that need Videobuf modules
-config VIDEOBUF_GEN
+# Used by drivers that need v4l2-jpeg.ko
+config V4L2_JPEG_HELPER
tristate
-config VIDEOBUF_DMA_SG
+# Used by drivers that need v4l2-h264.ko
+config V4L2_H264
tristate
- depends on HAS_DMA
- select VIDEOBUF_GEN
-config VIDEOBUF_VMALLOC
+# Used by drivers that need v4l2-vp9.ko
+config V4L2_VP9
tristate
- select VIDEOBUF_GEN
-config VIDEOBUF_DMA_CONTIG
+# Used by drivers that need v4l2-mem2mem.ko
+config V4L2_MEM2MEM_DEV
tristate
- depends on HAS_DMA
- select VIDEOBUF_GEN
+ depends on VIDEOBUF2_CORE
+
+# Used by LED subsystem flash drivers
+config V4L2_FLASH_LED_CLASS
+ tristate "V4L2 flash API for LED flash class devices"
+ depends on VIDEO_DEV
+ depends on LEDS_CLASS_FLASH
+ select MEDIA_CONTROLLER
+ select V4L2_ASYNC
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Say Y here to enable V4L2 flash API support for LED flash
+ class drivers.
-config VIDEOBUF_DVB
- tristate
- select VIDEOBUF_GEN
+ When in doubt, say N.
-# Used by drivers that need Videobuf2 modules
-config VIDEOBUF2_CORE
- select DMA_SHARED_BUFFER
+config V4L2_FWNODE
tristate
+ select V4L2_ASYNC
-config VIDEOBUF2_MEMOPS
+config V4L2_ASYNC
tristate
-config VIDEOBUF2_DMA_CONTIG
+config V4L2_CCI
tristate
- depends on HAS_DMA
- select VIDEOBUF2_CORE
- select VIDEOBUF2_MEMOPS
- select DMA_SHARED_BUFFER
-config VIDEOBUF2_VMALLOC
+config V4L2_CCI_I2C
tristate
- select VIDEOBUF2_CORE
- select VIDEOBUF2_MEMOPS
- select DMA_SHARED_BUFFER
+ depends on I2C
+ select REGMAP_I2C
+ select V4L2_CCI
-config VIDEOBUF2_DMA_SG
+config V4L2_ISP
tristate
- #depends on HAS_DMA
- select VIDEOBUF2_CORE
- select VIDEOBUF2_MEMOPS
-
-config VIDEO_V4L2_INT_DEVICE
- tristate "V4L2 int device (DEPRECATED)"
- depends on VIDEO_V4L2
- ---help---
- An early framework for a hardware-independent interface for
- image sensors and bridges etc. Currently used by omap24xxcam and
- tcm825x drivers that should be converted to V4L2 subdev.
-
- Do not use for new developments.
-
+ depends on VIDEOBUF2_CORE
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 4c33b8d6520c..329f0eadce99 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -1,40 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the V4L2 core
#
+ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I$(srctree)/drivers/media/tuners
+
tuner-objs := tuner-core.o
videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
- v4l2-event.o v4l2-ctrls.o v4l2-subdev.o v4l2-clk.o \
- v4l2-async.o
-ifeq ($(CONFIG_COMPAT),y)
- videodev-objs += v4l2-compat-ioctl32.o
-endif
-ifeq ($(CONFIG_OF),y)
- videodev-objs += v4l2-of.o
-endif
-
-obj-$(CONFIG_VIDEO_V4L2) += videodev.o
-obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
-obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
-
-obj-$(CONFIG_VIDEO_TUNER) += tuner.o
-
+ v4l2-event.o v4l2-subdev.o v4l2-common.o \
+ v4l2-ctrls-core.o v4l2-ctrls-api.o \
+ v4l2-ctrls-request.o v4l2-ctrls-defs.o
+
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+videodev-$(CONFIG_COMPAT) += v4l2-compat-ioctl32.o
+videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o
+videodev-$(CONFIG_SPI) += v4l2-spi.o
+videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o
+videodev-$(CONFIG_VIDEO_V4L2_I2C) += v4l2-i2c.o
+
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+
+obj-$(CONFIG_V4L2_ASYNC) += v4l2-async.o
+obj-$(CONFIG_V4L2_CCI) += v4l2-cci.o
+obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o
+obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
+obj-$(CONFIG_V4L2_H264) += v4l2-h264.o
+obj-$(CONFIG_V4L2_ISP) += v4l2-isp.o
+obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o
obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
+obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o
-obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
-obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
-obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
-obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
-obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
-
-obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o
-obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
-obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
-obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
-obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
-
-ccflags-y += -I$(srctree)/drivers/media/dvb-core
-ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
-ccflags-y += -I$(srctree)/drivers/media/tuners
-
+obj-$(CONFIG_VIDEO_TUNER) += tuner.o
+obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index ddc9379eb276..5687089bea6e 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* i2c tv tuner chip device driver
* core core, i.e. kernel interfaces, registering and so on
@@ -34,7 +35,7 @@
#include "tda8290.h"
#include "tea5761.h"
#include "tea5767.h"
-#include "tuner-xc2028.h"
+#include "xc2028.h"
#include "tuner-simple.h"
#include "tda9887.h"
#include "xc5000.h"
@@ -43,8 +44,6 @@
#define UNSET (-1U)
-#define PREFIX (t->i2c->driver->driver.name)
-
/*
* Driver modprobe parameters
*/
@@ -84,34 +83,67 @@ static const struct v4l2_subdev_ops tuner_ops;
* Debug macros
*/
-#define tuner_warn(fmt, arg...) do { \
- printk(KERN_WARNING "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
-
-#define tuner_info(fmt, arg...) do { \
- printk(KERN_INFO "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
-
-#define tuner_err(fmt, arg...) do { \
- printk(KERN_ERR "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
-
-#define tuner_dbg(fmt, arg...) do { \
- if (tuner_debug) \
- printk(KERN_DEBUG "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
+#undef pr_fmt
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %d-%04x: " fmt, \
+ i2c_adapter_id(t->i2c->adapter), t->i2c->addr
+
+
+#define dprintk(fmt, arg...) do { \
+ if (tuner_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg); \
+} while (0)
/*
- * Internal struct used inside the driver
+ * Internal enums/struct used inside the driver
+ */
+
+/**
+ * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER
+ *
+ * @TUNER_PAD_RF_INPUT:
+ * Radiofrequency (RF) sink pad, usually linked to a RF connector entity.
+ * @TUNER_PAD_OUTPUT:
+ * tuner video output source pad. Contains the video chrominance
+ * and luminance or the hole bandwidth of the signal converted to
+ * an Intermediate Frequency (IF) or to baseband (on zero-IF tuners).
+ * @TUNER_PAD_AUD_OUT:
+ * Tuner audio output source pad. Tuners used to decode analog TV
+ * signals have an extra pad for audio output. Old tuners use an
+ * analog stage with a saw filter for the audio IF frequency. The
+ * output of the pad is, in this case, the audio IF, with should be
+ * decoded either by the bridge chipset (that's the case of cx2388x
+ * chipsets) or may require an external IF sound processor, like
+ * msp34xx. On modern silicon tuners, the audio IF decoder is usually
+ * incorporated at the tuner. On such case, the output of this pad
+ * is an audio sampled data.
+ * @TUNER_NUM_PADS:
+ * Number of pads of the tuner.
*/
+enum tuner_pad_index {
+ TUNER_PAD_RF_INPUT,
+ TUNER_PAD_OUTPUT,
+ TUNER_PAD_AUD_OUT,
+ TUNER_NUM_PADS
+};
+
+/**
+ * enum if_vid_dec_pad_index - video IF-PLL pad index
+ * for MEDIA_ENT_F_IF_VID_DECODER
+ *
+ * @IF_VID_DEC_PAD_IF_INPUT:
+ * video Intermediate Frequency (IF) sink pad
+ * @IF_VID_DEC_PAD_OUT:
+ * IF-PLL video output source pad. Contains the video chrominance
+ * and luminance IF signals.
+ * @IF_VID_DEC_PAD_NUM_PADS:
+ * Number of pads of the video IF-PLL.
+ */
+enum if_vid_dec_pad_index {
+ IF_VID_DEC_PAD_IF_INPUT,
+ IF_VID_DEC_PAD_OUT,
+ IF_VID_DEC_PAD_NUM_PADS
+};
struct tuner {
/* device */
@@ -134,6 +166,10 @@ struct tuner {
unsigned int type; /* chip type id */
void *config;
const char *name;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad[TUNER_NUM_PADS];
+#endif
};
/*
@@ -204,7 +240,7 @@ static void fe_set_params(struct dvb_frontend *fe,
struct tuner *t = fe->analog_demod_priv;
if (NULL == fe_tuner_ops->set_analog_params) {
- tuner_warn("Tuner frontend module has no way to set freq\n");
+ pr_warn("Tuner frontend module has no way to set freq\n");
return;
}
fe_tuner_ops->set_analog_params(fe, params);
@@ -226,7 +262,7 @@ static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
if (fe_tuner_ops->set_config)
return fe_tuner_ops->set_config(fe, priv_cfg);
- tuner_warn("Tuner frontend module has no way to set config\n");
+ pr_warn("Tuner frontend module has no way to set config\n");
return 0;
}
@@ -247,15 +283,15 @@ static const struct analog_demod_ops tuner_analog_ops = {
/**
* set_type - Sets the tuner type for a given device
*
- * @c: i2c_client descriptoy
+ * @c: i2c_client descriptor
* @type: type of the tuner (e. g. tuner number)
* @new_mode_mask: Indicates if tuner supports TV and/or Radio
* @new_config: an optional parameter used by a few tuners to adjust
- internal parameters, like LNA mode
+ * internal parameters, like LNA mode
* @tuner_callback: an optional function to be called when switching
* to analog mode
*
- * This function applys the tuner config to tuner specified
+ * This function applies the tuner config to tuner specified
* by tun_setup structure. It contains several per-tuner initialization "magic"
*/
static void set_type(struct i2c_client *c, unsigned int type,
@@ -269,14 +305,14 @@ static void set_type(struct i2c_client *c, unsigned int type,
int tune_now = 1;
if (type == UNSET || type == TUNER_ABSENT) {
- tuner_dbg("tuner 0x%02x: Tuner type absent\n", c->addr);
+ dprintk("tuner 0x%02x: Tuner type absent\n", c->addr);
return;
}
t->type = type;
t->config = new_config;
if (tuner_callback != NULL) {
- tuner_dbg("defining GPIO callback\n");
+ dprintk("defining GPIO callback\n");
t->fe.callback = tuner_callback;
}
@@ -434,7 +470,11 @@ static void set_type(struct i2c_client *c, unsigned int type,
t->name = analog_ops->info.name;
}
- tuner_dbg("type set to %s\n", t->name);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ t->sd.entity.name = t->name;
+#endif
+
+ dprintk("type set to %s\n", t->name);
t->mode_mask = new_mode_mask;
@@ -451,13 +491,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
set_tv_freq(c, t->tv_freq);
}
- tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
- c->adapter->name, c->driver->driver.name, c->addr << 1, type,
+ dprintk("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
+ c->adapter->name, c->dev.driver->name, c->addr << 1, type,
t->mode_mask);
return;
attach_failed:
- tuner_dbg("Tuner attach for type = %d failed.\n", t->type);
+ dprintk("Tuner attach for type = %d failed.\n", t->type);
t->type = TUNER_ABSENT;
return;
@@ -469,7 +509,7 @@ attach_failed:
* @sd: subdev descriptor
* @tun_setup: type to be associated to a given tuner i2c address
*
- * This function applys the tuner config to tuner specified
+ * This function applies the tuner config to tuner specified
* by tun_setup structure.
* If tuner I2C address is UNSET, then it will only set the device
* if the tuner supports the mode specified in the call.
@@ -483,7 +523,7 @@ static int tuner_s_type_addr(struct v4l2_subdev *sd,
struct tuner *t = to_tuner(sd);
struct i2c_client *c = v4l2_get_subdevdata(sd);
- tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
+ dprintk("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
tun_setup->type,
tun_setup->addr,
tun_setup->mode_mask,
@@ -495,8 +535,7 @@ static int tuner_s_type_addr(struct v4l2_subdev *sd,
set_type(c, tun_setup->type, tun_setup->mode_mask,
tun_setup->config, tun_setup->tuner_callback);
} else
- tuner_dbg("set addr discarded for type %i, mask %x. "
- "Asked to change tuner at addr 0x%02x, with mask %x\n",
+ dprintk("set addr discarded for type %i, mask %x. Asked to change tuner at addr 0x%02x, with mask %x\n",
t->type, t->mode_mask,
tun_setup->addr, tun_setup->mode_mask);
@@ -526,7 +565,7 @@ static int tuner_s_config(struct v4l2_subdev *sd,
return 0;
}
- tuner_dbg("Tuner frontend module has no way to set config\n");
+ dprintk("Tuner frontend module has no way to set config\n");
return 0;
}
@@ -556,7 +595,7 @@ static void tuner_lookup(struct i2c_adapter *adap,
int mode_mask;
if (pos->i2c->adapter != adap ||
- strcmp(pos->i2c->driver->driver.name, "tuner"))
+ strcmp(pos->i2c->dev.driver->name, "tuner"))
continue;
mode_mask = pos->mode_mask;
@@ -575,7 +614,6 @@ static void tuner_lookup(struct i2c_adapter *adap,
*tuner_probe - Probes the existing tuners on an I2C bus
*
* @client: i2c_client descriptor
- * @id: not used
*
* This routine probes for tuners at the expected I2C addresses. On most
* cases, if a device answers to a given I2C address, it assumes that the
@@ -586,12 +624,14 @@ static void tuner_lookup(struct i2c_adapter *adap,
* During client attach, set_type is called by adapter's attach_inform callback.
* set_type must then be completed by tuner_probe.
*/
-static int tuner_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tuner_probe(struct i2c_client *client)
{
struct tuner *t;
struct tuner *radio;
struct tuner *tv;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int ret;
+#endif
t = kzalloc(sizeof(struct tuner), GFP_KERNEL);
if (NULL == t)
@@ -601,20 +641,18 @@ static int tuner_probe(struct i2c_client *client,
t->name = "(tuner unset)";
t->type = UNSET;
t->audmode = V4L2_TUNER_MODE_STEREO;
- t->standby = 1;
+ t->standby = true;
t->radio_freq = 87.5 * 16000; /* Initial freq range */
t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */
if (show_i2c) {
unsigned char buffer[16];
- int i, rc;
+ int rc;
memset(buffer, 0, sizeof(buffer));
rc = i2c_master_recv(client, buffer, sizeof(buffer));
- tuner_info("I2C RECV = ");
- for (i = 0; i < rc; i++)
- printk(KERN_CONT "%02x ", buffer[i]);
- printk("\n");
+ if (rc >= 0)
+ pr_info("I2C RECV = %*ph\n", rc, buffer);
}
/* autodetection code based on the i2c addr */
@@ -642,7 +680,7 @@ static int tuner_probe(struct i2c_client *client,
since it can be tda9887*/
if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
t->i2c->addr) >= 0) {
- tuner_dbg("tda829x detected\n");
+ dprintk("tda829x detected\n");
} else {
/* Default is being tda9887 */
t->type = TUNER_TDA9887;
@@ -679,11 +717,45 @@ static int tuner_probe(struct i2c_client *client,
t->mode_mask = T_ANALOG_TV;
if (radio == NULL)
t->mode_mask |= T_RADIO;
- tuner_dbg("Setting mode_mask to 0x%02x\n", t->mode_mask);
+ dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask);
}
/* Should be just before return */
register_client:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ t->sd.entity.name = t->name;
+ /*
+ * Handle the special case where the tuner has actually
+ * two stages: the PLL to tune into a frequency and the
+ * IF-PLL demodulator (tda988x).
+ */
+ if (t->type == TUNER_TDA9887) {
+ t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ t->pad[IF_VID_DEC_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
+ t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[IF_VID_DEC_PAD_OUT].sig_type = PAD_SIGNAL_ANALOG;
+ ret = media_entity_pads_init(&t->sd.entity,
+ IF_VID_DEC_PAD_NUM_PADS,
+ &t->pad[0]);
+ t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER;
+ } else {
+ t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ t->pad[TUNER_PAD_RF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
+ t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[TUNER_PAD_OUTPUT].sig_type = PAD_SIGNAL_ANALOG;
+ t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[TUNER_PAD_AUD_OUT].sig_type = PAD_SIGNAL_AUDIO;
+ ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS,
+ &t->pad[0]);
+ t->sd.entity.function = MEDIA_ENT_F_TUNER;
+ }
+
+ if (ret < 0) {
+ pr_err("failed to initialize media entity!\n");
+ kfree(t);
+ return ret;
+ }
+#endif
/* Sets a default mode */
if (t->mode_mask & T_ANALOG_TV)
t->mode = V4L2_TUNER_ANALOG_TV;
@@ -692,7 +764,7 @@ register_client:
set_type(client, t->type, t->mode_mask, t->config, t->fe.callback);
list_add_tail(&t->list, &tuner_list);
- tuner_info("Tuner %d found with type(s)%s%s.\n",
+ pr_info("Tuner %d found with type(s)%s%s.\n",
t->type,
t->mode_mask & T_RADIO ? " Radio" : "",
t->mode_mask & T_ANALOG_TV ? " TV" : "");
@@ -705,7 +777,7 @@ register_client:
* @client: i2c_client descriptor
*/
-static int tuner_remove(struct i2c_client *client)
+static void tuner_remove(struct i2c_client *client)
{
struct tuner *t = to_tuner(i2c_get_clientdata(client));
@@ -715,7 +787,6 @@ static int tuner_remove(struct i2c_client *client)
list_del(&t->list);
kfree(t);
- return 0;
}
/*
@@ -729,6 +800,7 @@ static int tuner_remove(struct i2c_client *client)
/**
* check_mode - Verify if tuner supports the requested mode
* @t: a pointer to the module's internal struct_tuner
+ * @mode: mode of the tuner, as defined by &enum v4l2_tuner_type.
*
* This function checks if the tuner is capable of tuning analog TV,
* digital TV or radio, depending on what the caller wants. If the
@@ -736,6 +808,7 @@ static int tuner_remove(struct i2c_client *client)
* returns 0.
* This function is needed for boards that have a separate tuner for
* radio (like devices with tea5767).
+ *
* NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
* select a TV frequency. So, t_mode = T_ANALOG_TV could actually
* be used to represent a Digital TV too.
@@ -769,15 +842,15 @@ static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
if (mode != t->mode) {
if (check_mode(t, mode) == -EINVAL) {
- tuner_dbg("Tuner doesn't support mode %d. "
- "Putting tuner to sleep\n", mode);
+ dprintk("Tuner doesn't support mode %d. Putting tuner to sleep\n",
+ mode);
t->standby = true;
if (analog_ops->standby)
analog_ops->standby(&t->fe);
return -EINVAL;
}
t->mode = mode;
- tuner_dbg("Changing to mode %d\n", mode);
+ dprintk("Changing to mode %d\n", mode);
}
return 0;
}
@@ -824,15 +897,15 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
};
if (t->type == UNSET) {
- tuner_warn("tuner type not set\n");
+ pr_warn("tuner type not set\n");
return;
}
if (NULL == analog_ops->set_params) {
- tuner_warn("Tuner has no way to set tv freq\n");
+ pr_warn("Tuner has no way to set tv freq\n");
return;
}
if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) {
- tuner_dbg("TV freq (%d.%02d) out of range (%d-%d)\n",
+ dprintk("TV freq (%d.%02d) out of range (%d-%d)\n",
freq / 16, freq % 16 * 100 / 16, tv_range[0],
tv_range[1]);
/* V4L2 spec: if the freq is not possible then the closest
@@ -843,7 +916,7 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
freq = tv_range[1] * 16;
}
params.frequency = freq;
- tuner_dbg("tv freq set to %d.%02d\n",
+ dprintk("tv freq set to %d.%02d\n",
freq / 16, freq % 16 * 100 / 16);
t->tv_freq = freq;
t->standby = false;
@@ -893,7 +966,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
return V4L2_STD_PAL_Nc;
return V4L2_STD_PAL_N;
default:
- tuner_warn("pal= argument not recognised\n");
+ pr_warn("pal= argument not recognised\n");
break;
}
}
@@ -919,7 +992,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
return V4L2_STD_SECAM_LC;
return V4L2_STD_SECAM_L;
default:
- tuner_warn("secam= argument not recognised\n");
+ pr_warn("secam= argument not recognised\n");
break;
}
}
@@ -936,7 +1009,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
case 'K':
return V4L2_STD_NTSC_M_KR;
default:
- tuner_info("ntsc= argument not recognised\n");
+ pr_info("ntsc= argument not recognised\n");
break;
}
}
@@ -965,15 +1038,15 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
};
if (t->type == UNSET) {
- tuner_warn("tuner type not set\n");
+ pr_warn("tuner type not set\n");
return;
}
if (NULL == analog_ops->set_params) {
- tuner_warn("tuner has no way to set radio frequency\n");
+ pr_warn("tuner has no way to set radio frequency\n");
return;
}
if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) {
- tuner_dbg("radio freq (%d.%02d) out of range (%d-%d)\n",
+ dprintk("radio freq (%d.%02d) out of range (%d-%d)\n",
freq / 16000, freq % 16000 * 100 / 16000,
radio_range[0], radio_range[1]);
/* V4L2 spec: if the freq is not possible then the closest
@@ -984,7 +1057,7 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
freq = radio_range[1] * 16000;
}
params.frequency = freq;
- tuner_dbg("radio freq set to %d.%02d\n",
+ dprintk("radio freq set to %d.%02d\n",
freq / 16000, freq % 16000 * 100 / 16000);
t->radio_freq = freq;
t->standby = false;
@@ -1035,26 +1108,26 @@ static void tuner_status(struct dvb_frontend *fe)
freq = t->tv_freq / 16;
freq_fraction = (t->tv_freq % 16) * 100 / 16;
}
- tuner_info("Tuner mode: %s%s\n", p,
+ pr_info("Tuner mode: %s%s\n", p,
t->standby ? " on standby mode" : "");
- tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
- tuner_info("Standard: 0x%08lx\n", (unsigned long)t->std);
+ pr_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
+ pr_info("Standard: 0x%08lx\n", (unsigned long)t->std);
if (t->mode != V4L2_TUNER_RADIO)
return;
if (fe_tuner_ops->get_status) {
- u32 tuner_status;
+ u32 tuner_status = 0;
fe_tuner_ops->get_status(&t->fe, &tuner_status);
if (tuner_status & TUNER_STATUS_LOCKED)
- tuner_info("Tuner is locked.\n");
+ pr_info("Tuner is locked.\n");
if (tuner_status & TUNER_STATUS_STEREO)
- tuner_info("Stereo: yes\n");
+ pr_info("Stereo: yes\n");
}
if (analog_ops->has_signal) {
u16 signal;
if (!analog_ops->has_signal(fe, &signal))
- tuner_info("Signal strength: %hu\n", signal);
+ pr_info("Signal strength: %hu\n", signal);
}
}
@@ -1076,24 +1149,15 @@ static int tuner_s_radio(struct v4l2_subdev *sd)
*/
/**
- * tuner_s_power - controls the power state of the tuner
+ * tuner_standby - places the tuner in standby mode
* @sd: pointer to struct v4l2_subdev
- * @on: a zero value puts the tuner to sleep, non-zero wakes it up
*/
-static int tuner_s_power(struct v4l2_subdev *sd, int on)
+static int tuner_standby(struct v4l2_subdev *sd)
{
struct tuner *t = to_tuner(sd);
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
- if (on) {
- if (t->standby && set_mode(t, t->mode) == 0) {
- tuner_dbg("Waking up tuner\n");
- set_freq(t, 0);
- }
- return 0;
- }
-
- tuner_dbg("Putting tuner to sleep\n");
+ dprintk("Putting tuner to sleep\n");
t->standby = true;
if (analog_ops->standby)
analog_ops->standby(&t->fe);
@@ -1109,7 +1173,7 @@ static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
t->std = tuner_fixup_std(t, std);
if (t->std != std)
- tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
+ dprintk("Fixup standard %llx to %llx\n", std, t->std);
set_freq(t, 0);
return 0;
}
@@ -1191,7 +1255,7 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
if (vt->type == t->mode) {
vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
if (fe_tuner_ops->get_status) {
- u32 tuner_status;
+ u32 tuner_status = 0;
fe_tuner_ops->get_status(&t->fe, &tuner_status);
vt->rxsubchans =
@@ -1258,9 +1322,11 @@ static int tuner_suspend(struct device *dev)
struct tuner *t = to_tuner(i2c_get_clientdata(c));
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
- tuner_dbg("suspend\n");
+ dprintk("suspend\n");
- if (!t->standby && analog_ops->standby)
+ if (t->fe.ops.tuner_ops.suspend)
+ t->fe.ops.tuner_ops.suspend(&t->fe);
+ else if (!t->standby && analog_ops->standby)
analog_ops->standby(&t->fe);
return 0;
@@ -1271,9 +1337,11 @@ static int tuner_resume(struct device *dev)
struct i2c_client *c = to_i2c_client(dev);
struct tuner *t = to_tuner(i2c_get_clientdata(c));
- tuner_dbg("resume\n");
+ dprintk("resume\n");
- if (!t->standby)
+ if (t->fe.ops.tuner_ops.resume)
+ t->fe.ops.tuner_ops.resume(&t->fe);
+ else if (!t->standby)
if (set_mode(t, t->mode) == 0)
set_freq(t, 0);
@@ -1301,11 +1369,10 @@ static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)
static const struct v4l2_subdev_core_ops tuner_core_ops = {
.log_status = tuner_log_status,
- .s_std = tuner_s_std,
- .s_power = tuner_s_power,
};
static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = {
+ .standby = tuner_standby,
.s_radio = tuner_s_radio,
.g_tuner = tuner_g_tuner,
.s_tuner = tuner_s_tuner,
@@ -1315,9 +1382,14 @@ static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = {
.s_config = tuner_s_config,
};
+static const struct v4l2_subdev_video_ops tuner_video_ops = {
+ .s_std = tuner_s_std,
+};
+
static const struct v4l2_subdev_ops tuner_ops = {
.core = &tuner_core_ops,
.tuner = &tuner_tuner_ops,
+ .video = &tuner_video_ops,
};
/*
@@ -1336,7 +1408,6 @@ MODULE_DEVICE_TABLE(i2c, tuner_id);
static struct i2c_driver tuner_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tuner",
.pm = &tuner_pm_ops,
},
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index aae241730caa..ee884a8221fb 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -1,73 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* V4L2 asynchronous subdevice registration API
*
* Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
+#include "v4l2-subdev-priv.h"
+
+static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ if (!n->ops || !n->ops->bound)
+ return 0;
+
+ return n->ops->bound(n, subdev, asc);
+}
+
+static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ if (!n->ops || !n->ops->unbind)
+ return;
+
+ n->ops->unbind(n, subdev, asc);
+}
+
+static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
+{
+ if (!n->ops || !n->ops->complete)
+ return 0;
+
+ return n->ops->complete(n);
+}
+
+static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
+ struct v4l2_async_connection *asc)
+{
+ if (!n->ops || !n->ops->destroy)
+ return;
+
+ n->ops->destroy(asc);
+}
+
+static bool match_i2c(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_match_desc *match)
{
#if IS_ENABLED(CONFIG_I2C)
- struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_client *client = i2c_verify_client(sd->dev);
+
return client &&
- asd->bus_type == V4L2_ASYNC_BUS_I2C &&
- asd->match.i2c.adapter_id == client->adapter->nr &&
- asd->match.i2c.address == client->addr;
+ match->i2c.adapter_id == client->adapter->nr &&
+ match->i2c.address == client->addr;
#else
return false;
#endif
}
-static bool match_platform(struct device *dev, struct v4l2_async_subdev *asd)
+static struct device *notifier_dev(struct v4l2_async_notifier *notifier)
+{
+ if (notifier->sd)
+ return notifier->sd->dev;
+
+ if (notifier->v4l2_dev)
+ return notifier->v4l2_dev->dev;
+
+ return NULL;
+}
+
+static bool
+match_fwnode_one(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
+ struct v4l2_async_match_desc *match)
+{
+ struct fwnode_handle *asd_dev_fwnode;
+ bool ret;
+
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: fwnode match: need %pfw, trying %pfw\n",
+ sd_fwnode, match->fwnode);
+
+ if (sd_fwnode == match->fwnode) {
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: direct match found\n");
+ return true;
+ }
+
+ if (!fwnode_graph_is_endpoint(match->fwnode)) {
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: direct match not found\n");
+ return false;
+ }
+
+ asd_dev_fwnode = fwnode_graph_get_port_parent(match->fwnode);
+
+ ret = sd_fwnode == asd_dev_fwnode;
+
+ fwnode_handle_put(asd_dev_fwnode);
+
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: device--endpoint match %sfound\n",
+ ret ? "" : "not ");
+
+ return ret;
+}
+
+static bool match_fwnode(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_match_desc *match)
{
- return asd->bus_type == V4L2_ASYNC_BUS_PLATFORM &&
- !strcmp(asd->match.platform.name, dev_name(dev));
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: matching for notifier %pfw, sd fwnode %pfw\n",
+ dev_fwnode(notifier_dev(notifier)), sd->fwnode);
+
+ if (!list_empty(&sd->async_subdev_endpoint_list)) {
+ struct v4l2_async_subdev_endpoint *ase;
+
+ dev_dbg(sd->dev,
+ "v4l2-async: endpoint fwnode list available, looking for %pfw\n",
+ match->fwnode);
+
+ list_for_each_entry(ase, &sd->async_subdev_endpoint_list,
+ async_subdev_endpoint_entry) {
+ bool matched = ase->endpoint == match->fwnode;
+
+ dev_dbg(sd->dev,
+ "v4l2-async: endpoint-endpoint match %sfound with %pfw\n",
+ matched ? "" : "not ", ase->endpoint);
+
+ if (matched)
+ return true;
+ }
+
+ dev_dbg(sd->dev, "async: no endpoint matched\n");
+
+ return false;
+ }
+
+ if (match_fwnode_one(notifier, sd, sd->fwnode, match))
+ return true;
+
+ /* Also check the secondary fwnode. */
+ if (IS_ERR_OR_NULL(sd->fwnode->secondary))
+ return false;
+
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: trying secondary fwnode match\n");
+
+ return match_fwnode_one(notifier, sd, sd->fwnode->secondary, match);
}
static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
-static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev_list *asdl)
+static struct v4l2_async_connection *
+v4l2_async_find_match(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
- struct v4l2_async_subdev *asd;
- bool (*match)(struct device *,
- struct v4l2_async_subdev *);
+ bool (*match)(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_match_desc *match);
+ struct v4l2_async_connection *asc;
- list_for_each_entry(asd, &notifier->waiting, list) {
+ list_for_each_entry(asc, &notifier->waiting_list, asc_entry) {
/* bus_type has been verified valid before */
- switch (asd->bus_type) {
- case V4L2_ASYNC_BUS_CUSTOM:
- match = asd->match.custom.match;
- if (!match)
- /* Match always */
- return asd;
- break;
- case V4L2_ASYNC_BUS_PLATFORM:
- match = match_platform;
- break;
- case V4L2_ASYNC_BUS_I2C:
+ switch (asc->match.type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
match = match_i2c;
break;
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE:
+ match = match_fwnode;
+ break;
default:
/* Cannot happen, unless someone breaks us */
WARN_ON(true);
@@ -75,210 +200,777 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *
}
/* match cannot be NULL here */
- if (match(sd->dev, asd))
- return asd;
+ if (match(notifier, sd, &asc->match))
+ return asc;
}
return NULL;
}
-static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev_list *asdl,
- struct v4l2_async_subdev *asd)
+/* Compare two async match descriptors for equivalence */
+static bool v4l2_async_match_equal(struct v4l2_async_match_desc *match1,
+ struct v4l2_async_match_desc *match2)
{
- struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
- int ret;
+ if (match1->type != match2->type)
+ return false;
+
+ switch (match1->type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ return match1->i2c.adapter_id == match2->i2c.adapter_id &&
+ match1->i2c.address == match2->i2c.address;
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE:
+ return match1->fwnode == match2->fwnode;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Find the sub-device notifier registered by a sub-device driver. */
+static struct v4l2_async_notifier *
+v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *n;
+
+ list_for_each_entry(n, &notifier_list, notifier_entry)
+ if (n->sd == sd)
+ return n;
+
+ return NULL;
+}
+
+/* Get v4l2_device related to the notifier if one can be found. */
+static struct v4l2_device *
+v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
+{
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ return notifier->v4l2_dev;
+}
+
+/*
+ * Return true if all child sub-device notifiers are complete, false otherwise.
+ */
+static bool
+v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_connection *asc;
+
+ if (!list_empty(&notifier->waiting_list))
+ return false;
- /* Remove from the waiting list */
- list_del(&asd->list);
- asdl->asd = asd;
- asdl->notifier = notifier;
+ list_for_each_entry(asc, &notifier->done_list, asc_entry) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(asc->sd);
- if (notifier->bound) {
- ret = notifier->bound(notifier, sd, asd);
+ if (subdev_notifier &&
+ !v4l2_async_nf_can_complete(subdev_notifier))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Complete the master notifier if possible. This is done when all async
+ * sub-devices have been bound; v4l2_device is also available then.
+ */
+static int
+v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_notifier *__notifier = notifier;
+
+ /* Quick check whether there are still more sub-devices here. */
+ if (!list_empty(&notifier->waiting_list))
+ return 0;
+
+ if (notifier->sd)
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: trying to complete\n");
+
+ /* Check the entire notifier tree; find the root notifier first. */
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ /* This is root if it has v4l2_dev. */
+ if (!notifier->v4l2_dev) {
+ dev_dbg(notifier_dev(__notifier),
+ "v4l2-async: V4L2 device not available\n");
+ return 0;
+ }
+
+ /* Is everything ready? */
+ if (!v4l2_async_nf_can_complete(notifier))
+ return 0;
+
+ dev_dbg(notifier_dev(__notifier), "v4l2-async: complete\n");
+
+ return v4l2_async_nf_call_complete(notifier);
+}
+
+static int
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
+
+static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *sd)
+{
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ struct media_link *link;
+
+ if (sd->entity.function != MEDIA_ENT_F_LENS &&
+ sd->entity.function != MEDIA_ENT_F_FLASH)
+ return 0;
+
+ if (!n->sd) {
+ dev_warn(notifier_dev(n),
+ "not a sub-device notifier, not creating an ancillary link for %s!\n",
+ dev_name(sd->dev));
+ return 0;
+ }
+
+ link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
+
+ return IS_ERR(link) ? PTR_ERR(link) : 0;
+#else
+ return 0;
+#endif
+}
+
+static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct v4l2_async_notifier *subdev_notifier;
+ bool registered = false;
+ int ret;
+
+ if (list_empty(&sd->asc_list)) {
+ ret = __v4l2_device_register_subdev(v4l2_dev, sd, sd->owner);
if (ret < 0)
return ret;
+ registered = true;
}
- /* Move from the global subdevice list to notifier's done */
- list_move(&asdl->list, &notifier->done);
- ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
+ ret = v4l2_async_nf_call_bound(notifier, sd, asc);
if (ret < 0) {
- if (notifier->unbind)
- notifier->unbind(notifier, sd, asd);
- return ret;
+ if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
+ dev_dbg(notifier_dev(notifier),
+ "failed binding %pfw (%d)\n",
+ asc->match.fwnode, ret);
+ goto err_unregister_subdev;
}
- if (list_empty(&notifier->waiting) && notifier->complete)
- return notifier->complete(notifier);
+ if (registered) {
+ /*
+ * Depending of the function of the entities involved, we may
+ * want to create links between them (for example between a
+ * sensor and its lens or between a sensor's source pad and the
+ * connected device's sink pad).
+ */
+ ret = v4l2_async_create_ancillary_links(notifier, sd);
+ if (ret) {
+ if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
+ dev_dbg(notifier_dev(notifier),
+ "failed creating links for %pfw (%d)\n",
+ asc->match.fwnode, ret);
+ goto err_call_unbind;
+ }
+ }
+
+ list_add(&asc->asc_subdev_entry, &sd->asc_list);
+ asc->sd = sd;
+
+ /* Move from the waiting list to notifier's done */
+ list_move(&asc->asc_entry, &notifier->done_list);
+
+ dev_dbg(notifier_dev(notifier), "v4l2-async: %s bound (ret %d)\n",
+ dev_name(sd->dev), ret);
+
+ /*
+ * See if the sub-device has a notifier. If not, return here.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (!subdev_notifier || subdev_notifier->parent)
+ return 0;
+
+ /*
+ * Proceed with checking for the sub-device notifier's async
+ * sub-devices, and return the result. The error will be handled by the
+ * caller.
+ */
+ subdev_notifier->parent = notifier;
+
+ return v4l2_async_nf_try_all_subdevs(subdev_notifier);
+
+err_call_unbind:
+ v4l2_async_nf_call_unbind(notifier, sd, asc);
+ list_del(&asc->asc_subdev_entry);
+
+err_unregister_subdev:
+ if (registered)
+ v4l2_device_unregister_subdev(sd);
+
+ return ret;
+}
+
+/* Test all async sub-devices in a notifier for a match. */
+static int
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_nf_find_v4l2_dev(notifier);
+ struct v4l2_subdev *sd;
+
+ if (!v4l2_dev)
+ return 0;
+
+ dev_dbg(notifier_dev(notifier), "v4l2-async: trying all sub-devices\n");
+
+again:
+ list_for_each_entry(sd, &subdev_list, async_list) {
+ struct v4l2_async_connection *asc;
+ int ret;
+
+ asc = v4l2_async_find_match(notifier, sd);
+ if (!asc)
+ continue;
+
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: match found, subdev %s\n", sd->name);
+
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asc);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * v4l2_async_match_notify() may lead to registering a
+ * new notifier and thus changing the async subdevs
+ * list. In order to proceed safely from here, restart
+ * parsing the list from the beginning.
+ */
+ goto again;
+ }
return 0;
}
-static void v4l2_async_cleanup(struct v4l2_async_subdev_list *asdl)
+static void v4l2_async_unbind_subdev_one(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_connection *asc)
{
- struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
+ list_move_tail(&asc->asc_entry, &notifier->waiting_list);
+ if (list_is_singular(&asc->asc_subdev_entry)) {
+ v4l2_async_nf_call_unbind(notifier, asc->sd, asc);
+ v4l2_device_unregister_subdev(asc->sd);
+ asc->sd = NULL;
+ }
+ list_del(&asc->asc_subdev_entry);
+}
+
+/* Unbind all sub-devices in the notifier tree. */
+static void
+v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_connection *asc, *asc_tmp;
+
+ list_for_each_entry_safe(asc, asc_tmp, &notifier->done_list,
+ asc_entry) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(asc->sd);
+
+ if (subdev_notifier)
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
- v4l2_device_unregister_subdev(sd);
- /* Subdevice driver will reprobe and put asdl back onto the list */
- list_del_init(&asdl->list);
- asdl->asd = NULL;
- sd->dev = NULL;
+ v4l2_async_unbind_subdev_one(notifier, asc);
+ }
+
+ notifier->parent = NULL;
}
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier)
+/* See if an async sub-device can be found in a notifier's lists. */
+static bool
+v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_match_desc *match)
{
- struct v4l2_async_subdev_list *asdl, *tmp;
- struct v4l2_async_subdev *asd;
- int i;
+ struct v4l2_async_connection *asc;
- if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
- return -EINVAL;
+ list_for_each_entry(asc, &notifier->waiting_list, asc_entry)
+ if (v4l2_async_match_equal(&asc->match, match))
+ return true;
- notifier->v4l2_dev = v4l2_dev;
- INIT_LIST_HEAD(&notifier->waiting);
- INIT_LIST_HEAD(&notifier->done);
+ list_for_each_entry(asc, &notifier->done_list, asc_entry)
+ if (v4l2_async_match_equal(&asc->match, match))
+ return true;
- for (i = 0; i < notifier->num_subdevs; i++) {
- asd = notifier->subdev[i];
+ return false;
+}
- switch (asd->bus_type) {
- case V4L2_ASYNC_BUS_CUSTOM:
- case V4L2_ASYNC_BUS_PLATFORM:
- case V4L2_ASYNC_BUS_I2C:
- break;
- default:
- dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
- "Invalid bus-type %u on %p\n",
- asd->bus_type, asd);
- return -EINVAL;
+/*
+ * Find out whether an async sub-device was set up already or whether it exists
+ * in a given notifier.
+ */
+static bool
+v4l2_async_nf_has_async_match(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_match_desc *match)
+{
+ struct list_head *heads[] = {
+ &notifier->waiting_list,
+ &notifier->done_list,
+ };
+ unsigned int i;
+
+ lockdep_assert_held(&list_lock);
+
+ /* Check that an asd is not being added more than once. */
+ for (i = 0; i < ARRAY_SIZE(heads); i++) {
+ struct v4l2_async_connection *asc;
+
+ list_for_each_entry(asc, heads[i], asc_entry) {
+ if (&asc->match == match)
+ continue;
+ if (v4l2_async_match_equal(&asc->match, match))
+ return true;
}
- list_add_tail(&asd->list, &notifier->waiting);
}
- mutex_lock(&list_lock);
+ /* Check that an asc does not exist in other notifiers. */
+ list_for_each_entry(notifier, &notifier_list, notifier_entry)
+ if (v4l2_async_nf_has_async_match_entry(notifier, match))
+ return true;
- /* Keep also completed notifiers on the list */
- list_add(&notifier->list, &notifier_list);
+ return false;
+}
- list_for_each_entry_safe(asdl, tmp, &subdev_list, list) {
- int ret;
+static int v4l2_async_nf_match_valid(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_match_desc *match)
+{
+ struct device *dev = notifier_dev(notifier);
+
+ switch (match->type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE:
+ if (v4l2_async_nf_has_async_match(notifier, match)) {
+ dev_dbg(dev, "v4l2-async: match descriptor already listed in a notifier\n");
+ return -EEXIST;
+ }
+ break;
+ default:
+ dev_err(dev, "v4l2-async: Invalid match type %u on %p\n",
+ match->type, match);
+ return -EINVAL;
+ }
- asd = v4l2_async_belongs(notifier, asdl);
- if (!asd)
- continue;
+ return 0;
+}
- ret = v4l2_async_test_notify(notifier, asdl, asd);
- if (ret < 0) {
- mutex_unlock(&list_lock);
- return ret;
- }
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev)
+{
+ INIT_LIST_HEAD(&notifier->waiting_list);
+ INIT_LIST_HEAD(&notifier->done_list);
+ INIT_LIST_HEAD(&notifier->notifier_entry);
+ notifier->v4l2_dev = v4l2_dev;
+}
+EXPORT_SYMBOL(v4l2_async_nf_init);
+
+void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd)
+{
+ INIT_LIST_HEAD(&notifier->waiting_list);
+ INIT_LIST_HEAD(&notifier->done_list);
+ INIT_LIST_HEAD(&notifier->notifier_entry);
+ notifier->sd = sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init);
+
+static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_connection *asc;
+ int ret;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(asc, &notifier->waiting_list, asc_entry) {
+ ret = v4l2_async_nf_match_valid(notifier, &asc->match);
+ if (ret)
+ goto err_unlock;
}
+ ret = v4l2_async_nf_try_all_subdevs(notifier);
+ if (ret < 0)
+ goto err_unbind;
+
+ ret = v4l2_async_nf_try_complete(notifier);
+ if (ret < 0)
+ goto err_unbind;
+
+ /* Keep also completed notifiers on the list */
+ list_add(&notifier->notifier_entry, &notifier_list);
+
mutex_unlock(&list_lock);
return 0;
+
+err_unbind:
+ /*
+ * On failure, unbind all sub-devices registered through this notifier.
+ */
+ v4l2_async_nf_unbind_all_subdevs(notifier);
+
+err_unlock:
+ mutex_unlock(&list_lock);
+
+ return ret;
}
-EXPORT_SYMBOL(v4l2_async_notifier_register);
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+int v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
{
- struct v4l2_async_subdev_list *asdl, *tmp;
- unsigned int notif_n_subdev = notifier->num_subdevs;
- unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
- struct device *dev[n_subdev];
- int i = 0;
+ if (WARN_ON(!notifier->v4l2_dev == !notifier->sd))
+ return -EINVAL;
+
+ return __v4l2_async_nf_register(notifier);
+}
+EXPORT_SYMBOL(v4l2_async_nf_register);
+
+static void
+__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+{
+ if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
+ return;
+
+ v4l2_async_nf_unbind_all_subdevs(notifier);
+ list_del_init(&notifier->notifier_entry);
+}
+
+void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+{
mutex_lock(&list_lock);
- list_del(&notifier->list);
+ __v4l2_async_nf_unregister(notifier);
- list_for_each_entry_safe(asdl, tmp, &notifier->done, list) {
- struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(v4l2_async_nf_unregister);
- dev[i] = get_device(sd->dev);
+static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_connection *asc, *tmp;
+
+ if (!notifier || !notifier->waiting_list.next)
+ return;
- v4l2_async_cleanup(asdl);
+ WARN_ON(!list_empty(&notifier->done_list));
- /* If we handled USB devices, we'd have to lock the parent too */
- device_release_driver(dev[i++]);
+ list_for_each_entry_safe(asc, tmp, &notifier->waiting_list, asc_entry) {
+ list_del(&asc->asc_entry);
+ v4l2_async_nf_call_destroy(notifier, asc);
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asdl.asd);
+ if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
+ fwnode_handle_put(asc->match.fwnode);
+
+ kfree(asc);
}
+ notifier->sd = NULL;
+ notifier->v4l2_dev = NULL;
+}
+
+void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
+{
+ mutex_lock(&list_lock);
+
+ __v4l2_async_nf_cleanup(notifier);
+
mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
- while (i--) {
- struct device *d = dev[i];
+static void __v4l2_async_nf_add_connection(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_connection *asc)
+{
+ mutex_lock(&list_lock);
- if (d && device_attach(d) < 0) {
- const char *name = "(none)";
- int lock = device_trylock(d);
+ list_add_tail(&asc->asc_entry, &notifier->waiting_list);
- if (lock && d->driver)
- name = d->driver->name;
- dev_err(d, "Failed to re-probe to %s\n", name);
- if (lock)
- device_unlock(d);
- }
- put_device(d);
- }
+ mutex_unlock(&list_lock);
+}
+
+struct v4l2_async_connection *
+__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asc_struct_size)
+{
+ struct v4l2_async_connection *asc;
+
+ asc = kzalloc(asc_struct_size, GFP_KERNEL);
+ if (!asc)
+ return ERR_PTR(-ENOMEM);
+
+ asc->notifier = notifier;
+ asc->match.type = V4L2_ASYNC_MATCH_TYPE_FWNODE;
+ asc->match.fwnode = fwnode_handle_get(fwnode);
+
+ __v4l2_async_nf_add_connection(notifier, asc);
+
+ return asc;
+}
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
+
+struct v4l2_async_connection *
+__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ unsigned int asc_struct_size)
+{
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *remote;
+
+ remote = fwnode_graph_get_remote_endpoint(endpoint);
+ if (!remote)
+ return ERR_PTR(-ENOTCONN);
+
+ asc = __v4l2_async_nf_add_fwnode(notif, remote, asc_struct_size);
/*
- * Don't care about the waiting list, it is initialised and populated
- * upon notifier registration.
+ * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
+ * so drop the one we got in fwnode_graph_get_remote_port_parent.
*/
+ fwnode_handle_put(remote);
+ return asc;
+}
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
+
+struct v4l2_async_connection *
+__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
+ unsigned short address, unsigned int asc_struct_size)
+{
+ struct v4l2_async_connection *asc;
+
+ asc = kzalloc(asc_struct_size, GFP_KERNEL);
+ if (!asc)
+ return ERR_PTR(-ENOMEM);
+
+ asc->notifier = notifier;
+ asc->match.type = V4L2_ASYNC_MATCH_TYPE_I2C;
+ asc->match.i2c.adapter_id = adapter_id;
+ asc->match.i2c.address = address;
+
+ __v4l2_async_nf_add_connection(notifier, asc);
+
+ return asc;
+}
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
+
+int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd,
+ struct fwnode_handle *fwnode)
+{
+ struct v4l2_async_subdev_endpoint *ase;
+
+ ase = kmalloc(sizeof(*ase), GFP_KERNEL);
+ if (!ase)
+ return -ENOMEM;
+
+ ase->endpoint = fwnode;
+ list_add(&ase->async_subdev_endpoint_entry,
+ &sd->async_subdev_endpoint_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_subdev_endpoint_add);
+
+struct v4l2_async_connection *
+v4l2_async_connection_unique(struct v4l2_subdev *sd)
+{
+ if (!list_is_singular(&sd->asc_list))
+ return NULL;
+
+ return list_first_entry(&sd->asc_list,
+ struct v4l2_async_connection, asc_subdev_entry);
}
-EXPORT_SYMBOL(v4l2_async_notifier_unregister);
+EXPORT_SYMBOL_GPL(v4l2_async_connection_unique);
-int v4l2_async_register_subdev(struct v4l2_subdev *sd)
+int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module)
{
- struct v4l2_async_subdev_list *asdl = &sd->asdl;
+ struct v4l2_async_notifier *subdev_notifier;
struct v4l2_async_notifier *notifier;
+ struct v4l2_async_connection *asc;
+ int ret;
+
+ INIT_LIST_HEAD(&sd->asc_list);
+
+ /*
+ * No reference taken. The reference is held by the device (struct
+ * v4l2_subdev.dev), and async sub-device does not exist independently
+ * of the device at any point of time.
+ *
+ * The async sub-device shall always be registered for its device node,
+ * not the endpoint node.
+ */
+ if (!sd->fwnode && sd->dev) {
+ sd->fwnode = dev_fwnode(sd->dev);
+ } else if (fwnode_graph_is_endpoint(sd->fwnode)) {
+ dev_warn(sd->dev, "sub-device fwnode is an endpoint!\n");
+ return -EINVAL;
+ }
+
+ sd->owner = module;
mutex_lock(&list_lock);
- INIT_LIST_HEAD(&asdl->list);
+ list_for_each_entry(notifier, &notifier_list, notifier_entry) {
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_nf_find_v4l2_dev(notifier);
- list_for_each_entry(notifier, &notifier_list, list) {
- struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, asdl);
- if (asd) {
- int ret = v4l2_async_test_notify(notifier, asdl, asd);
- mutex_unlock(&list_lock);
- return ret;
+ if (!v4l2_dev)
+ continue;
+
+ while ((asc = v4l2_async_find_match(notifier, sd))) {
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd,
+ asc);
+ if (ret)
+ goto err_unbind;
+
+ ret = v4l2_async_nf_try_complete(notifier);
+ if (ret)
+ goto err_unbind;
}
}
/* None matched, wait for hot-plugging */
- list_add(&asdl->list, &subdev_list);
+ list_add(&sd->async_list, &subdev_list);
mutex_unlock(&list_lock);
return 0;
+
+err_unbind:
+ /*
+ * Complete failed. Unbind the sub-devices bound through registering
+ * this async sub-device.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (subdev_notifier)
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
+
+ if (asc)
+ v4l2_async_unbind_subdev_one(notifier, asc);
+
+ mutex_unlock(&list_lock);
+
+ sd->owner = NULL;
+
+ return ret;
}
-EXPORT_SYMBOL(v4l2_async_register_subdev);
+EXPORT_SYMBOL(__v4l2_async_register_subdev);
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
- struct v4l2_async_subdev_list *asdl = &sd->asdl;
- struct v4l2_async_notifier *notifier = asdl->notifier;
+ struct v4l2_async_connection *asc, *asc_tmp;
- if (!asdl->asd) {
- if (!list_empty(&asdl->list))
- v4l2_async_cleanup(asdl);
+ if (!sd->async_list.next)
return;
- }
+
+ v4l2_subdev_put_privacy_led(sd);
mutex_lock(&list_lock);
- list_add(&asdl->asd->list, &notifier->waiting);
+ __v4l2_async_nf_unregister(sd->subdev_notifier);
+ __v4l2_async_nf_cleanup(sd->subdev_notifier);
+ kfree(sd->subdev_notifier);
+ sd->subdev_notifier = NULL;
- v4l2_async_cleanup(asdl);
+ if (sd->asc_list.next) {
+ list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list,
+ asc_subdev_entry) {
+ v4l2_async_unbind_subdev_one(asc->notifier, asc);
+ }
+ }
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asdl.asd);
+ list_del(&sd->async_list);
+ sd->async_list.next = NULL;
mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_unregister_subdev);
+
+static void print_waiting_match(struct seq_file *s,
+ struct v4l2_async_match_desc *match)
+{
+ switch (match->type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ seq_printf(s, " [i2c] dev=%d-%04x\n", match->i2c.adapter_id,
+ match->i2c.address);
+ break;
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE: {
+ struct fwnode_handle *devnode, *fwnode = match->fwnode;
+
+ devnode = fwnode_graph_is_endpoint(fwnode) ?
+ fwnode_graph_get_port_parent(fwnode) :
+ fwnode_handle_get(fwnode);
+
+ seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
+ devnode->dev ? dev_name(devnode->dev) : "nil",
+ fwnode);
+
+ fwnode_handle_put(devnode);
+ break;
+ }
+ }
+}
+
+static const char *
+v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
+{
+ if (notifier->v4l2_dev)
+ return notifier->v4l2_dev->name;
+ else if (notifier->sd)
+ return notifier->sd->name;
+ else
+ return "nil";
+}
+
+static int pending_subdevs_show(struct seq_file *s, void *data)
+{
+ struct v4l2_async_notifier *notif;
+ struct v4l2_async_connection *asc;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(notif, &notifier_list, notifier_entry) {
+ seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
+ list_for_each_entry(asc, &notif->waiting_list, asc_entry)
+ print_waiting_match(s, &asc->match);
+ }
+
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
+
+static struct dentry *v4l2_async_debugfs_dir;
+
+static int __init v4l2_async_init(void)
+{
+ v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
+ debugfs_create_file("pending_async_subdevices", 0444,
+ v4l2_async_debugfs_dir, NULL,
+ &pending_subdevs_fops);
+
+ return 0;
+}
+
+static void __exit v4l2_async_exit(void)
+{
+ debugfs_remove_recursive(v4l2_async_debugfs_dir);
+}
+
+subsys_initcall(v4l2_async_init);
+module_exit(v4l2_async_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_DESCRIPTION("V4L2 asynchronous subdevice registration API");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
new file mode 100644
index 000000000000..e9ecf4785946
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-cci.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPI Camera Control Interface (CCI) register access helpers.
+ *
+ * Copyright (C) 2023 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <linux/unaligned.h>
+
+#include <media/v4l2-cci.h>
+
+int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+{
+ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+
+ /*
+ * TODO: Fix smatch. Assign *val to 0 here in order to avoid
+ * failing a smatch check on caller when the caller proceeds to
+ * read *val without initialising it on caller's side. *val is set
+ * to a valid value whenever this function returns 0 but smatch
+ * can't figure that out currently.
+ */
+ *val = 0;
+
+ if (err && *err)
+ return *err;
+
+ little_endian = reg & CCI_REG_LE;
+ len = CCI_REG_WIDTH_BYTES(reg);
+ reg = CCI_REG_ADDR(reg);
+
+ ret = regmap_bulk_read(map, reg, buf, len);
+ if (ret) {
+ dev_err(regmap_get_device(map), "Error reading reg 0x%04x: %d\n",
+ reg, ret);
+ goto out;
+ }
+
+ switch (len) {
+ case 1:
+ *val = buf[0];
+ break;
+ case 2:
+ if (little_endian)
+ *val = get_unaligned_le16(buf);
+ else
+ *val = get_unaligned_be16(buf);
+ break;
+ case 3:
+ if (little_endian)
+ *val = get_unaligned_le24(buf);
+ else
+ *val = get_unaligned_be24(buf);
+ break;
+ case 4:
+ if (little_endian)
+ *val = get_unaligned_le32(buf);
+ else
+ *val = get_unaligned_be32(buf);
+ break;
+ case 8:
+ if (little_endian)
+ *val = get_unaligned_le64(buf);
+ else
+ *val = get_unaligned_be64(buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+ len, reg);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cci_read);
+
+int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+{
+ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ little_endian = reg & CCI_REG_LE;
+ len = CCI_REG_WIDTH_BYTES(reg);
+ reg = CCI_REG_ADDR(reg);
+
+ switch (len) {
+ case 1:
+ buf[0] = val;
+ break;
+ case 2:
+ if (little_endian)
+ put_unaligned_le16(val, buf);
+ else
+ put_unaligned_be16(val, buf);
+ break;
+ case 3:
+ if (little_endian)
+ put_unaligned_le24(val, buf);
+ else
+ put_unaligned_be24(val, buf);
+ break;
+ case 4:
+ if (little_endian)
+ put_unaligned_le32(val, buf);
+ else
+ put_unaligned_be32(val, buf);
+ break;
+ case 8:
+ if (little_endian)
+ put_unaligned_le64(val, buf);
+ else
+ put_unaligned_be64(val, buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+ len, reg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = regmap_bulk_write(map, reg, buf, len);
+ if (ret)
+ dev_err(regmap_get_device(map), "Error writing reg 0x%04x: %d\n",
+ reg, ret);
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cci_write);
+
+int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err)
+{
+ u64 readval;
+ int ret;
+
+ ret = cci_read(map, reg, &readval, err);
+ if (ret)
+ return ret;
+
+ val = (readval & ~mask) | (val & mask);
+
+ return cci_write(map, reg, val, err);
+}
+EXPORT_SYMBOL_GPL(cci_update_bits);
+
+int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_regs; i++) {
+ ret = cci_write(map, regs[i].reg, regs[i].val, err);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cci_multi_reg_write);
+
+#if IS_ENABLED(CONFIG_V4L2_CCI_I2C)
+struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client,
+ int reg_addr_bits)
+{
+ struct regmap_config config = {
+ .reg_bits = reg_addr_bits,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .disable_locking = true,
+ };
+
+ return devm_regmap_init_i2c(client, &config);
+}
+EXPORT_SYMBOL_GPL(devm_cci_regmap_init_i2c);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("MIPI Camera Control Interface (CCI) support");
diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c
deleted file mode 100644
index b67de8642b5a..000000000000
--- a/drivers/media/v4l2-core/v4l2-clk.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * V4L2 clock service
- *
- * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/atomic.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include <media/v4l2-clk.h>
-#include <media/v4l2-subdev.h>
-
-static DEFINE_MUTEX(clk_lock);
-static LIST_HEAD(clk_list);
-
-static struct v4l2_clk *v4l2_clk_find(const char *dev_id, const char *id)
-{
- struct v4l2_clk *clk;
-
- list_for_each_entry(clk, &clk_list, list) {
- if (strcmp(dev_id, clk->dev_id))
- continue;
-
- if (!id || !clk->id || !strcmp(clk->id, id))
- return clk;
- }
-
- return ERR_PTR(-ENODEV);
-}
-
-struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
-{
- struct v4l2_clk *clk;
-
- mutex_lock(&clk_lock);
- clk = v4l2_clk_find(dev_name(dev), id);
-
- if (!IS_ERR(clk))
- atomic_inc(&clk->use_count);
- mutex_unlock(&clk_lock);
-
- return clk;
-}
-EXPORT_SYMBOL(v4l2_clk_get);
-
-void v4l2_clk_put(struct v4l2_clk *clk)
-{
- struct v4l2_clk *tmp;
-
- if (IS_ERR(clk))
- return;
-
- mutex_lock(&clk_lock);
-
- list_for_each_entry(tmp, &clk_list, list)
- if (tmp == clk)
- atomic_dec(&clk->use_count);
-
- mutex_unlock(&clk_lock);
-}
-EXPORT_SYMBOL(v4l2_clk_put);
-
-static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
-{
- struct v4l2_clk *tmp;
- int ret = -ENODEV;
-
- mutex_lock(&clk_lock);
-
- list_for_each_entry(tmp, &clk_list, list)
- if (tmp == clk) {
- ret = !try_module_get(clk->ops->owner);
- if (ret)
- ret = -EFAULT;
- break;
- }
-
- mutex_unlock(&clk_lock);
-
- return ret;
-}
-
-static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
-{
- module_put(clk->ops->owner);
-}
-
-int v4l2_clk_enable(struct v4l2_clk *clk)
-{
- int ret = v4l2_clk_lock_driver(clk);
-
- if (ret < 0)
- return ret;
-
- mutex_lock(&clk->lock);
-
- if (++clk->enable == 1 && clk->ops->enable) {
- ret = clk->ops->enable(clk);
- if (ret < 0)
- clk->enable--;
- }
-
- mutex_unlock(&clk->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(v4l2_clk_enable);
-
-/*
- * You might Oops if you try to disabled a disabled clock, because then the
- * driver isn't locked and could have been unloaded by now, so, don't do that
- */
-void v4l2_clk_disable(struct v4l2_clk *clk)
-{
- int enable;
-
- mutex_lock(&clk->lock);
-
- enable = --clk->enable;
- if (WARN(enable < 0, "Unbalanced %s() on %s:%s!\n", __func__,
- clk->dev_id, clk->id))
- clk->enable++;
- else if (!enable && clk->ops->disable)
- clk->ops->disable(clk);
-
- mutex_unlock(&clk->lock);
-
- v4l2_clk_unlock_driver(clk);
-}
-EXPORT_SYMBOL(v4l2_clk_disable);
-
-unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
-{
- int ret = v4l2_clk_lock_driver(clk);
-
- if (ret < 0)
- return ret;
-
- mutex_lock(&clk->lock);
- if (!clk->ops->get_rate)
- ret = -ENOSYS;
- else
- ret = clk->ops->get_rate(clk);
- mutex_unlock(&clk->lock);
-
- v4l2_clk_unlock_driver(clk);
-
- return ret;
-}
-EXPORT_SYMBOL(v4l2_clk_get_rate);
-
-int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
-{
- int ret = v4l2_clk_lock_driver(clk);
-
- if (ret < 0)
- return ret;
-
- mutex_lock(&clk->lock);
- if (!clk->ops->set_rate)
- ret = -ENOSYS;
- else
- ret = clk->ops->set_rate(clk, rate);
- mutex_unlock(&clk->lock);
-
- v4l2_clk_unlock_driver(clk);
-
- return ret;
-}
-EXPORT_SYMBOL(v4l2_clk_set_rate);
-
-struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
- const char *dev_id,
- const char *id, void *priv)
-{
- struct v4l2_clk *clk;
- int ret;
-
- if (!ops || !dev_id)
- return ERR_PTR(-EINVAL);
-
- clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
- if (!clk)
- return ERR_PTR(-ENOMEM);
-
- clk->id = kstrdup(id, GFP_KERNEL);
- clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
- if ((id && !clk->id) || !clk->dev_id) {
- ret = -ENOMEM;
- goto ealloc;
- }
- clk->ops = ops;
- clk->priv = priv;
- atomic_set(&clk->use_count, 0);
- mutex_init(&clk->lock);
-
- mutex_lock(&clk_lock);
- if (!IS_ERR(v4l2_clk_find(dev_id, id))) {
- mutex_unlock(&clk_lock);
- ret = -EEXIST;
- goto eexist;
- }
- list_add_tail(&clk->list, &clk_list);
- mutex_unlock(&clk_lock);
-
- return clk;
-
-eexist:
-ealloc:
- kfree(clk->id);
- kfree(clk->dev_id);
- kfree(clk);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(v4l2_clk_register);
-
-void v4l2_clk_unregister(struct v4l2_clk *clk)
-{
- if (WARN(atomic_read(&clk->use_count),
- "%s(): Refusing to unregister ref-counted %s:%s clock!\n",
- __func__, clk->dev_id, clk->id))
- return;
-
- mutex_lock(&clk_lock);
- list_del(&clk->list);
- mutex_unlock(&clk_lock);
-
- kfree(clk->id);
- kfree(clk->dev_id);
- kfree(clk);
-}
-EXPORT_SYMBOL(v4l2_clk_unregister);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index a95e5e23403f..554c591e1113 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Video for Linux Two
*
@@ -7,14 +8,8 @@
* This file replaces the videodev.c file that comes with the
* regular kernel distribution.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Author: Bill Dirks <bill@thedirks.org>
* based on code by Alan Cox, <alan@cymru.net>
- *
*/
/*
@@ -23,11 +18,6 @@
* A generic video device interface for the LINUX operating system
* using a set of device structures/vectors for low level operations.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
*
* Fixes:
@@ -44,18 +34,16 @@
* Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
*/
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/i2c.h>
-#if defined(CONFIG_SPI)
-#include <linux/spi/spi.h>
-#endif
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/div64.h>
#include <media/v4l2-common.h>
@@ -64,10 +52,6 @@
#include <linux/videodev2.h>
-MODULE_AUTHOR("Bill Dirks, Justin Schoeman, Gerd Knorr");
-MODULE_DESCRIPTION("misc helper functions for v4l2 device drivers");
-MODULE_LICENSE("GPL");
-
/*
*
* V 4 L 2 D R I V E R H E L P E R A P I
@@ -80,40 +64,14 @@ MODULE_LICENSE("GPL");
/* Helper functions for control handling */
-/* Check for correctness of the ctrl's value based on the data from
- struct v4l2_queryctrl and the available menu items. Note that
- menu_items may be NULL, in that case it is ignored. */
-int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
- const char * const *menu_items)
-{
- if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED)
- return -EINVAL;
- if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED)
- return -EBUSY;
- if (qctrl->type == V4L2_CTRL_TYPE_STRING)
- return 0;
- if (qctrl->type == V4L2_CTRL_TYPE_BUTTON ||
- qctrl->type == V4L2_CTRL_TYPE_INTEGER64 ||
- qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
- return 0;
- if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum)
- return -ERANGE;
- if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) {
- if (menu_items[ctrl->value] == NULL ||
- menu_items[ctrl->value][0] == '\0')
- return -EINVAL;
- }
- if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
- (ctrl->value & ~qctrl->maximum))
- return -ERANGE;
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_check);
-
/* Fill in a struct v4l2_queryctrl */
-int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def)
+int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
{
const char *name;
+ s64 min = _min;
+ s64 max = _max;
+ u64 step = _step;
+ s64 def = _def;
v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
&min, &max, &step, &def, &qctrl->flags);
@@ -126,301 +84,11 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
qctrl->step = step;
qctrl->default_value = def;
qctrl->reserved[0] = qctrl->reserved[1] = 0;
- strlcpy(qctrl->name, name, sizeof(qctrl->name));
+ strscpy(qctrl->name, name, sizeof(qctrl->name));
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_query_fill);
-/* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and
- the menu. The qctrl pointer may be NULL, in which case it is ignored.
- If menu_items is NULL, then the menu items are retrieved using
- v4l2_ctrl_get_menu. */
-int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qctrl,
- const char * const *menu_items)
-{
- int i;
-
- qmenu->reserved = 0;
- if (menu_items == NULL)
- menu_items = v4l2_ctrl_get_menu(qmenu->id);
- if (menu_items == NULL ||
- (qctrl && (qmenu->index < qctrl->minimum || qmenu->index > qctrl->maximum)))
- return -EINVAL;
- for (i = 0; i < qmenu->index && menu_items[i]; i++) ;
- if (menu_items[i] == NULL || menu_items[i][0] == '\0')
- return -EINVAL;
- strlcpy(qmenu->name, menu_items[qmenu->index], sizeof(qmenu->name));
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_query_menu);
-
-/* Fill in a struct v4l2_querymenu based on the specified array of valid
- menu items (terminated by V4L2_CTRL_MENU_IDS_END).
- Use this if there are 'holes' in the list of valid menu items. */
-int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids)
-{
- const char * const *menu_items = v4l2_ctrl_get_menu(qmenu->id);
-
- qmenu->reserved = 0;
- if (menu_items == NULL || ids == NULL)
- return -EINVAL;
- while (*ids != V4L2_CTRL_MENU_IDS_END) {
- if (*ids++ == qmenu->index) {
- strlcpy(qmenu->name, menu_items[qmenu->index],
- sizeof(qmenu->name));
- return 0;
- }
- }
- return -EINVAL;
-}
-EXPORT_SYMBOL(v4l2_ctrl_query_menu_valid_items);
-
-/* ctrl_classes points to an array of u32 pointers, the last element is
- a NULL pointer. Each u32 array is a 0-terminated array of control IDs.
- Each array must be sorted low to high and belong to the same control
- class. The array of u32 pointers must also be sorted, from low class IDs
- to high class IDs.
-
- This function returns the first ID that follows after the given ID.
- When no more controls are available 0 is returned. */
-u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
-{
- u32 ctrl_class = V4L2_CTRL_ID2CLASS(id);
- const u32 *pctrl;
-
- if (ctrl_classes == NULL)
- return 0;
-
- /* if no query is desired, then check if the ID is part of ctrl_classes */
- if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) {
- /* find class */
- while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class)
- ctrl_classes++;
- if (*ctrl_classes == NULL)
- return 0;
- pctrl = *ctrl_classes;
- /* find control ID */
- while (*pctrl && *pctrl != id) pctrl++;
- return *pctrl ? id : 0;
- }
- id &= V4L2_CTRL_ID_MASK;
- id++; /* select next control */
- /* find first class that matches (or is greater than) the class of
- the ID */
- while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) < ctrl_class)
- ctrl_classes++;
- /* no more classes */
- if (*ctrl_classes == NULL)
- return 0;
- pctrl = *ctrl_classes;
- /* find first ctrl within the class that is >= ID */
- while (*pctrl && *pctrl < id) pctrl++;
- if (*pctrl)
- return *pctrl;
- /* we are at the end of the controls of the current class. */
- /* continue with next class if available */
- ctrl_classes++;
- if (*ctrl_classes == NULL)
- return 0;
- return **ctrl_classes;
-}
-EXPORT_SYMBOL(v4l2_ctrl_next);
-
-/* I2C Helper functions */
-
-#if IS_ENABLED(CONFIG_I2C)
-
-void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
- const struct v4l2_subdev_ops *ops)
-{
- v4l2_subdev_init(sd, ops);
- sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
- /* the owner is the same as the i2c_client's driver owner */
- sd->owner = client->driver->driver.owner;
- sd->dev = &client->dev;
- /* i2c_client and v4l2_subdev point to one another */
- v4l2_set_subdevdata(sd, client);
- i2c_set_clientdata(client, sd);
- /* initialize name */
- snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
- client->driver->driver.name, i2c_adapter_id(client->adapter),
- client->addr);
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
-
-/* Load an i2c sub-device. */
-struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
- struct i2c_adapter *adapter, struct i2c_board_info *info,
- const unsigned short *probe_addrs)
-{
- struct v4l2_subdev *sd = NULL;
- struct i2c_client *client;
-
- BUG_ON(!v4l2_dev);
-
- request_module(I2C_MODULE_PREFIX "%s", info->type);
-
- /* Create the i2c client */
- if (info->addr == 0 && probe_addrs)
- client = i2c_new_probed_device(adapter, info, probe_addrs,
- NULL);
- else
- client = i2c_new_device(adapter, info);
-
- /* Note: by loading the module first we are certain that c->driver
- will be set if the driver was found. If the module was not loaded
- first, then the i2c core tries to delay-load the module for us,
- and then c->driver is still NULL until the module is finally
- loaded. This delay-load mechanism doesn't work if other drivers
- want to use the i2c device, so explicitly loading the module
- is the best alternative. */
- if (client == NULL || client->driver == NULL)
- goto error;
-
- /* Lock the module so we can safely get the v4l2_subdev pointer */
- if (!try_module_get(client->driver->driver.owner))
- goto error;
- sd = i2c_get_clientdata(client);
-
- /* Register with the v4l2_device which increases the module's
- use count as well. */
- if (v4l2_device_register_subdev(v4l2_dev, sd))
- sd = NULL;
- /* Decrease the module use count to match the first try_module_get. */
- module_put(client->driver->driver.owner);
-
-error:
- /* If we have a client but no subdev, then something went wrong and
- we must unregister the client. */
- if (client && sd == NULL)
- i2c_unregister_device(client);
- return sd;
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
-
-struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
- struct i2c_adapter *adapter, const char *client_type,
- u8 addr, const unsigned short *probe_addrs)
-{
- struct i2c_board_info info;
-
- /* Setup the i2c board info with the device type and
- the device address. */
- memset(&info, 0, sizeof(info));
- strlcpy(info.type, client_type, sizeof(info.type));
- info.addr = addr;
-
- return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
-
-/* Return i2c client address of v4l2_subdev. */
-unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- return client ? client->addr : I2C_CLIENT_END;
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
-
-/* Return a list of I2C tuner addresses to probe. Use only if the tuner
- addresses are unknown. */
-const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
-{
- static const unsigned short radio_addrs[] = {
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
- 0x10,
-#endif
- 0x60,
- I2C_CLIENT_END
- };
- static const unsigned short demod_addrs[] = {
- 0x42, 0x43, 0x4a, 0x4b,
- I2C_CLIENT_END
- };
- static const unsigned short tv_addrs[] = {
- 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
- 0x60, 0x61, 0x62, 0x63, 0x64,
- I2C_CLIENT_END
- };
-
- switch (type) {
- case ADDRS_RADIO:
- return radio_addrs;
- case ADDRS_DEMOD:
- return demod_addrs;
- case ADDRS_TV:
- return tv_addrs;
- case ADDRS_TV_WITH_DEMOD:
- return tv_addrs + 4;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
-
-#endif /* defined(CONFIG_I2C) */
-
-#if defined(CONFIG_SPI)
-
-/* Load an spi sub-device. */
-
-void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
- const struct v4l2_subdev_ops *ops)
-{
- v4l2_subdev_init(sd, ops);
- sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
- /* the owner is the same as the spi_device's driver owner */
- sd->owner = spi->dev.driver->owner;
- sd->dev = &spi->dev;
- /* spi_device and v4l2_subdev point to one another */
- v4l2_set_subdevdata(sd, spi);
- spi_set_drvdata(spi, sd);
- /* initialize name */
- strlcpy(sd->name, spi->dev.driver->name, sizeof(sd->name));
-}
-EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
-
-struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
- struct spi_master *master, struct spi_board_info *info)
-{
- struct v4l2_subdev *sd = NULL;
- struct spi_device *spi = NULL;
-
- BUG_ON(!v4l2_dev);
-
- if (info->modalias[0])
- request_module(info->modalias);
-
- spi = spi_new_device(master, info);
-
- if (spi == NULL || spi->dev.driver == NULL)
- goto error;
-
- if (!try_module_get(spi->dev.driver->owner))
- goto error;
-
- sd = spi_get_drvdata(spi);
-
- /* Register with the v4l2_device which increases the module's
- use count as well. */
- if (v4l2_device_register_subdev(v4l2_dev, sd))
- sd = NULL;
-
- /* Decrease the module use count to match the first try_module_get. */
- module_put(spi->dev.driver->owner);
-
-error:
- /* If we have a client but no subdev, then something went wrong and
- we must unregister the client. */
- if (spi && sd == NULL)
- spi_unregister_device(spi);
-
- return sd;
-}
-EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
-
-#endif /* defined(CONFIG_SPI) */
-
/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
* and max don't have to be aligned, but there must be at least one valid
* value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
@@ -431,33 +99,26 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
/* Bits that must be zero to be aligned */
unsigned int mask = ~((1 << align) - 1);
+ /* Clamp to aligned min and max */
+ x = clamp(x, (min + ~mask) & mask, max & mask);
+
/* Round to nearest aligned value */
if (align)
x = (x + (1 << (align - 1))) & mask;
- /* Clamp to aligned value of min and max */
- if (x < min)
- x = (min + ~mask) & mask;
- else if (x > max)
- x = max & mask;
+ return x;
+}
+
+static unsigned int clamp_roundup(unsigned int x, unsigned int min,
+ unsigned int max, unsigned int alignment)
+{
+ x = clamp(x, min, max);
+ if (alignment)
+ x = round_up(x, alignment);
return x;
}
-/* Bound an image to have a width between wmin and wmax, and height between
- * hmin and hmax, inclusive. Additionally, the width will be a multiple of
- * 2^walign, the height will be a multiple of 2^halign, and the overall size
- * (width*height) will be a multiple of 2^salign. The image may be shrunk
- * or enlarged to fit the alignment constraints.
- *
- * The width or height maximum must not be smaller than the corresponding
- * minimum. The alignments must not be so high there are no possible image
- * sizes within the allowed bounds. wmin and hmin must be at least 1
- * (don't use 0). If you don't care about a certain alignment, specify 0,
- * as 2^0 is 1 and one byte alignment is equivalent to no alignment. If
- * you only want to adjust downward, specify a maximum that's the same as
- * the initial value.
- */
void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
unsigned int walign,
u32 *h, unsigned int hmin, unsigned int hmax,
@@ -495,394 +156,653 @@ void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
}
EXPORT_SYMBOL_GPL(v4l_bound_align_image);
-/**
- * v4l_match_dv_timings - check if two timings match
- * @t1 - compare this v4l2_dv_timings struct...
- * @t2 - with this struct.
- * @pclock_delta - the allowed pixelclock deviation.
- *
- * Compare t1 with t2 with a given margin of error for the pixelclock.
- */
-bool v4l_match_dv_timings(const struct v4l2_dv_timings *t1,
- const struct v4l2_dv_timings *t2,
- unsigned pclock_delta)
+const void *
+__v4l2_find_nearest_size_conditional(const void *array, size_t array_size,
+ size_t entry_size, size_t width_offset,
+ size_t height_offset, s32 width,
+ s32 height,
+ bool (*func)(const void *array,
+ size_t index,
+ const void *context),
+ const void *context)
{
- if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
- return false;
- if (t1->bt.width == t2->bt.width &&
- t1->bt.height == t2->bt.height &&
- t1->bt.interlaced == t2->bt.interlaced &&
- t1->bt.polarities == t2->bt.polarities &&
- t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
- t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
- t1->bt.hfrontporch == t2->bt.hfrontporch &&
- t1->bt.vfrontporch == t2->bt.vfrontporch &&
- t1->bt.vsync == t2->bt.vsync &&
- t1->bt.vbackporch == t2->bt.vbackporch &&
- (!t1->bt.interlaced ||
- (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
- t1->bt.il_vsync == t2->bt.il_vsync &&
- t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
- return true;
- return false;
+ u32 error, min_error = U32_MAX;
+ const void *best = NULL;
+ size_t i;
+
+ if (!array)
+ return NULL;
+
+ for (i = 0; i < array_size; i++, array += entry_size) {
+ const u32 *entry_width = array + width_offset;
+ const u32 *entry_height = array + height_offset;
+
+ if (func && !func(array, i, context))
+ continue;
+
+ error = abs(*entry_width - width) + abs(*entry_height - height);
+ if (error > min_error)
+ continue;
+
+ min_error = error;
+ best = array;
+ if (!error)
+ break;
+ }
+
+ return best;
}
-EXPORT_SYMBOL_GPL(v4l_match_dv_timings);
+EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size_conditional);
-/*
- * CVT defines
- * Based on Coordinated Video Timings Standard
- * version 1.1 September 10, 2003
- */
+int v4l2_g_parm_cap(struct video_device *vdev,
+ struct v4l2_subdev *sd, struct v4l2_streamparm *a)
+{
+ struct v4l2_subdev_frame_interval ival = { 0 };
+ int ret;
-#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
-
-/* Normal blanking */
-#define CVT_MIN_V_BPORCH 7 /* lines */
-#define CVT_MIN_V_PORCH_RND 3 /* lines */
-#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
-
-/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
-#define CVT_CELL_GRAN 8 /* character cell granularity */
-#define CVT_M 600 /* blanking formula gradient */
-#define CVT_C 40 /* blanking formula offset */
-#define CVT_K 128 /* blanking formula scaling factor */
-#define CVT_J 20 /* blanking formula scaling factor */
-#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
-#define CVT_M_PRIME (CVT_K * CVT_M / 256)
-
-/* Reduced Blanking */
-#define CVT_RB_MIN_V_BPORCH 7 /* lines */
-#define CVT_RB_V_FPORCH 3 /* lines */
-#define CVT_RB_MIN_V_BLANK 460 /* us */
-#define CVT_RB_H_SYNC 32 /* pixels */
-#define CVT_RB_H_BPORCH 80 /* pixels */
-#define CVT_RB_H_BLANK 160 /* pixels */
-
-/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
- * @frame_height - the total height of the frame (including blanking) in lines.
- * @hfreq - the horizontal frequency in Hz.
- * @vsync - the height of the vertical sync in lines.
- * @polarities - the horizontal and vertical polarities (same as struct
- * v4l2_bt_timings polarities).
- * @fmt - the resulting timings.
- *
- * This function will attempt to detect if the given values correspond to a
- * valid CVT format. If so, then it will return true, and fmt will be filled
- * in with the found CVT timings.
- */
-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, struct v4l2_dv_timings *fmt)
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ if (vdev->device_caps & V4L2_CAP_READWRITE)
+ a->parm.capture.readbuffers = 2;
+ if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ ret = v4l2_subdev_call_state_active(sd, pad, get_frame_interval, &ival);
+ if (!ret)
+ a->parm.capture.timeperframe = ival.interval;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_g_parm_cap);
+
+int v4l2_s_parm_cap(struct video_device *vdev,
+ struct v4l2_subdev *sd, struct v4l2_streamparm *a)
{
- int v_fp, v_bp, h_fp, h_bp, hsync;
- int frame_width, image_height, image_width;
- bool reduced_blanking;
- unsigned pix_clk;
-
- if (vsync < 4 || vsync > 7)
- return false;
-
- if (polarities == V4L2_DV_VSYNC_POS_POL)
- reduced_blanking = false;
- else if (polarities == V4L2_DV_HSYNC_POS_POL)
- reduced_blanking = true;
+ struct v4l2_subdev_frame_interval ival = {
+ .interval = a->parm.capture.timeperframe
+ };
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ memset(&a->parm, 0, sizeof(a->parm));
+ if (vdev->device_caps & V4L2_CAP_READWRITE)
+ a->parm.capture.readbuffers = 2;
else
- return false;
+ a->parm.capture.readbuffers = 0;
+
+ if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ ret = v4l2_subdev_call_state_active(sd, pad, set_frame_interval, &ival);
+ if (!ret)
+ a->parm.capture.timeperframe = ival.interval;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_s_parm_cap);
- /* Vertical */
- if (reduced_blanking) {
- v_fp = CVT_RB_V_FPORCH;
- v_bp = (CVT_RB_MIN_V_BLANK * hfreq + 999999) / 1000000;
- v_bp -= vsync + v_fp;
+const struct v4l2_format_info *v4l2_format_info(u32 format)
+{
+ static const struct v4l2_format_info formats[] = {
+ /* RGB formats */
+ { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB565X, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB2101010, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+
+ /* YUV packed formats */
+ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_Y210, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_Y216, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
+ { .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
+
+ /* YUV planar formats */
+ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV15, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV20, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+
+ { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+
+ /* Tiled YUV formats */
+ { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }},
+ { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+
+ /* YUV planar formats, non contiguous variant */
+ { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+
+ /* Tiled YUV formats, non contiguous variant */
+ { .format = V4L2_PIX_FMT_NV12MT, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 64, 32, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
+ { .format = V4L2_PIX_FMT_NV12MT_16X16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 16, 8, 0, 0 }, .block_h = { 16, 8, 0, 0 }},
+
+ /* Bayer RGB formats */
+ { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 5, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 2, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB14P, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 7, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB16, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+
+ /* Renesas Camera Data Receiver Unit formats, bayer order agnostic */
+ { .format = V4L2_PIX_FMT_RAW_CRU10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 6, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RAW_CRU12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 5, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RAW_CRU14, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 4, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RAW_CRU20, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 3, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ };
+ unsigned int i;
- if (v_bp < CVT_RB_MIN_V_BPORCH)
- v_bp = CVT_RB_MIN_V_BPORCH;
- } else {
- v_fp = CVT_MIN_V_PORCH_RND;
- v_bp = (CVT_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
+ for (i = 0; i < ARRAY_SIZE(formats); ++i)
+ if (formats[i].format == format)
+ return &formats[i];
+ return NULL;
+}
+EXPORT_SYMBOL(v4l2_format_info);
- if (v_bp < CVT_MIN_V_BPORCH)
- v_bp = CVT_MIN_V_BPORCH;
- }
- image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
-
- /* Aspect ratio based on vsync */
- switch (vsync) {
- case 4:
- image_width = (image_height * 4) / 3;
- break;
- case 5:
- image_width = (image_height * 16) / 9;
- break;
- case 6:
- image_width = (image_height * 16) / 10;
- break;
- case 7:
- /* special case */
- if (image_height == 1024)
- image_width = (image_height * 5) / 4;
- else if (image_height == 768)
- image_width = (image_height * 15) / 9;
- else
- return false;
- break;
- default:
- return false;
+static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane)
+{
+ if (!info->block_w[plane])
+ return 1;
+ return info->block_w[plane];
+}
+
+static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane)
+{
+ if (!info->block_h[plane])
+ return 1;
+ return info->block_h[plane];
+}
+
+static inline unsigned int v4l2_format_plane_stride(const struct v4l2_format_info *info, int plane,
+ unsigned int width)
+{
+ unsigned int hdiv = plane ? info->hdiv : 1;
+ unsigned int aligned_width =
+ ALIGN(width, v4l2_format_block_width(info, plane));
+
+ return DIV_ROUND_UP(aligned_width, hdiv) *
+ info->bpp[plane] / info->bpp_div[plane];
+}
+
+static inline unsigned int v4l2_format_plane_height(const struct v4l2_format_info *info, int plane,
+ unsigned int height)
+{
+ unsigned int vdiv = plane ? info->vdiv : 1;
+ unsigned int aligned_height =
+ ALIGN(height, v4l2_format_block_height(info, plane));
+
+ return DIV_ROUND_UP(aligned_height, vdiv);
+}
+
+static inline unsigned int v4l2_format_plane_size(const struct v4l2_format_info *info, int plane,
+ unsigned int width, unsigned int height)
+{
+ return v4l2_format_plane_stride(info, plane, width) *
+ v4l2_format_plane_height(info, plane, height);
+}
+
+void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
+ const struct v4l2_frmsize_stepwise *frmsize)
+{
+ if (!frmsize)
+ return;
+
+ /*
+ * Clamp width/height to meet min/max constraints and round it up to
+ * macroblock alignment.
+ */
+ *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width,
+ frmsize->step_width);
+ *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height,
+ frmsize->step_height);
+}
+EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints);
+
+int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
+ u32 pixelformat, u32 width, u32 height)
+{
+ const struct v4l2_format_info *info;
+ struct v4l2_plane_pix_format *plane;
+ int i;
+
+ info = v4l2_format_info(pixelformat);
+ if (!info)
+ return -EINVAL;
+
+ pixfmt->width = width;
+ pixfmt->height = height;
+ pixfmt->pixelformat = pixelformat;
+ pixfmt->num_planes = info->mem_planes;
+
+ if (info->mem_planes == 1) {
+ plane = &pixfmt->plane_fmt[0];
+ plane->bytesperline = v4l2_format_plane_stride(info, 0, width);
+ plane->sizeimage = 0;
+
+ for (i = 0; i < info->comp_planes; i++)
+ plane->sizeimage +=
+ v4l2_format_plane_size(info, i, width, height);
+ } else {
+ for (i = 0; i < info->comp_planes; i++) {
+ plane = &pixfmt->plane_fmt[i];
+ plane->bytesperline =
+ v4l2_format_plane_stride(info, i, width);
+ plane->sizeimage = plane->bytesperline *
+ v4l2_format_plane_height(info, i, height);
+ }
}
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
+
+int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
+ u32 width, u32 height)
+{
+ const struct v4l2_format_info *info;
+ int i;
+
+ info = v4l2_format_info(pixelformat);
+ if (!info)
+ return -EINVAL;
+
+ /* Single planar API cannot be used for multi plane formats. */
+ if (info->mem_planes > 1)
+ return -EINVAL;
+
+ pixfmt->width = width;
+ pixfmt->height = height;
+ pixfmt->pixelformat = pixelformat;
+ pixfmt->bytesperline = v4l2_format_plane_stride(info, 0, width);
+ pixfmt->sizeimage = 0;
+
+ for (i = 0; i < info->comp_planes; i++)
+ pixfmt->sizeimage +=
+ v4l2_format_plane_size(info, i, width, height);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+static s64 v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
+ unsigned int mul, unsigned int div)
+{
+ struct v4l2_ctrl *ctrl;
+ s64 freq;
- image_width = image_width & ~7;
+ ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ);
+ if (ctrl) {
+ struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
+ int ret;
- /* Horizontal */
- if (reduced_blanking) {
- pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq;
- pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+ qm.index = v4l2_ctrl_g_ctrl(ctrl);
- h_bp = CVT_RB_H_BPORCH;
- hsync = CVT_RB_H_SYNC;
- h_fp = CVT_RB_H_BLANK - h_bp - hsync;
+ ret = v4l2_querymenu(handler, &qm);
+ if (ret)
+ return -ENOENT;
- frame_width = image_width + CVT_RB_H_BLANK;
+ freq = qm.value;
} else {
- int h_blank;
- unsigned ideal_duty_cycle = CVT_C_PRIME - (CVT_M_PRIME * 1000) / hfreq;
+ if (!mul || !div)
+ return -ENOENT;
- h_blank = (image_width * ideal_duty_cycle + (100 - ideal_duty_cycle) / 2) /
- (100 - ideal_duty_cycle);
- h_blank = h_blank - h_blank % (2 * CVT_CELL_GRAN);
+ ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl)
+ return -ENOENT;
- if (h_blank * 100 / image_width < 20) {
- h_blank = image_width / 5;
- h_blank = (h_blank + 0x7) & ~0x7;
- }
+ freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
+
+ pr_warn_once("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
+ __func__);
+ pr_warn_once("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
+ __func__);
+ }
- pix_clk = (image_width + h_blank) * hfreq;
- pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+ return freq > 0 ? freq : -EINVAL;
+}
- h_bp = h_blank / 2;
- frame_width = image_width + h_blank;
+s64 v4l2_get_link_freq(const struct media_pad *pad, unsigned int mul,
+ unsigned int div)
+{
+ struct v4l2_mbus_config mbus_config = {};
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ ret = v4l2_subdev_call(sd, pad, get_mbus_config, pad->index,
+ &mbus_config);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (mbus_config.link_freq)
+ return mbus_config.link_freq;
+
+ /*
+ * Fall back to using the link frequency control if the media bus config
+ * doesn't provide a link frequency.
+ */
+ return v4l2_get_link_freq_ctrl(sd->ctrl_handler, mul, div);
+}
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
- hsync = (frame_width * 8 + 50) / 100;
- hsync = hsync - hsync % CVT_CELL_GRAN;
- h_fp = h_blank - hsync - h_bp;
+int v4l2_get_active_data_lanes(const struct media_pad *pad,
+ unsigned int max_data_lanes)
+{
+ struct v4l2_mbus_config mbus_config = {};
+ struct v4l2_subdev *sd;
+ unsigned int lanes;
+ int ret;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ ret = v4l2_subdev_call(sd, pad, get_mbus_config, pad->index,
+ &mbus_config);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ /* This relies on the mbus_config being zeroed at init time */
+ lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
+ if (!lanes)
+ return max_data_lanes;
+
+ if (lanes > max_data_lanes) {
+ dev_dbg(sd->dev, "Active data lanes (%u) exceeds max (%u)\n",
+ lanes, max_data_lanes);
+ return -EINVAL;
}
- fmt->bt.polarities = polarities;
- fmt->bt.width = image_width;
- fmt->bt.height = image_height;
- fmt->bt.hfrontporch = h_fp;
- fmt->bt.vfrontporch = v_fp;
- fmt->bt.hsync = hsync;
- fmt->bt.vsync = vsync;
- fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
- fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
- fmt->bt.pixelclock = pix_clk;
- fmt->bt.standards = V4L2_DV_BT_STD_CVT;
- if (reduced_blanking)
- fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
- return true;
+ return lanes;
}
-EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+EXPORT_SYMBOL_GPL(v4l2_get_active_data_lanes);
+#endif
/*
- * GTF defines
- * Based on Generalized Timing Formula Standard
- * Version 1.1 September 2, 1999
+ * Simplify a fraction using a simple continued fraction decomposition. The
+ * idea here is to convert fractions such as 333333/10000000 to 1/30 using
+ * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
+ * arbitrary parameters to remove non-significative terms from the simple
+ * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
+ * respectively seems to give nice results.
*/
-
-#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
-
-#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
-#define GTF_V_FP 1 /* vertical front porch (lines) */
-#define GTF_CELL_GRAN 8 /* character cell granularity */
-
-/* Default */
-#define GTF_D_M 600 /* blanking formula gradient */
-#define GTF_D_C 40 /* blanking formula offset */
-#define GTF_D_K 128 /* blanking formula scaling factor */
-#define GTF_D_J 20 /* blanking formula scaling factor */
-#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
-#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
-
-/* Secondary */
-#define GTF_S_M 3600 /* blanking formula gradient */
-#define GTF_S_C 40 /* blanking formula offset */
-#define GTF_S_K 128 /* blanking formula scaling factor */
-#define GTF_S_J 35 /* blanking formula scaling factor */
-#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
-#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
-
-/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
- * @frame_height - the total height of the frame (including blanking) in lines.
- * @hfreq - the horizontal frequency in Hz.
- * @vsync - the height of the vertical sync in lines.
- * @polarities - the horizontal and vertical polarities (same as struct
- * v4l2_bt_timings polarities).
- * @aspect - preferred aspect ratio. GTF has no method of determining the
- * aspect ratio in order to derive the image width from the
- * image height, so it has to be passed explicitly. Usually
- * the native screen aspect ratio is used for this. If it
- * is not filled in correctly, then 16:9 will be assumed.
- * @fmt - the resulting timings.
- *
- * This function will attempt to detect if the given values correspond to a
- * valid GTF format. If so, then it will return true, and fmt will be filled
- * in with the found GTF timings.
- */
-bool v4l2_detect_gtf(unsigned frame_height,
- unsigned hfreq,
- unsigned vsync,
- u32 polarities,
- struct v4l2_fract aspect,
- struct v4l2_dv_timings *fmt)
+void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
+ unsigned int n_terms, unsigned int threshold)
{
- int pix_clk;
- int v_fp, v_bp, h_fp, hsync;
- int frame_width, image_height, image_width;
- bool default_gtf;
- int h_blank;
-
- if (vsync != 3)
- return false;
-
- if (polarities == V4L2_DV_VSYNC_POS_POL)
- default_gtf = true;
- else if (polarities == V4L2_DV_HSYNC_POS_POL)
- default_gtf = false;
- else
- return false;
+ u32 *an;
+ u32 x, y, r;
+ unsigned int i, n;
- /* Vertical */
- v_fp = GTF_V_FP;
- v_bp = (GTF_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
- image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+ an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
+ if (an == NULL)
+ return;
+
+ /*
+ * Convert the fraction to a simple continued fraction. See
+ * https://en.wikipedia.org/wiki/Continued_fraction
+ * Stop if the current term is bigger than or equal to the given
+ * threshold.
+ */
+ x = *numerator;
+ y = *denominator;
+
+ for (n = 0; n < n_terms && y != 0; ++n) {
+ an[n] = x / y;
+ if (an[n] >= threshold) {
+ if (n < 2)
+ n++;
+ break;
+ }
- if (aspect.numerator == 0 || aspect.denominator == 0) {
- aspect.numerator = 16;
- aspect.denominator = 9;
+ r = x - an[n] * y;
+ x = y;
+ y = r;
}
- image_width = ((image_height * aspect.numerator) / aspect.denominator);
-
- /* Horizontal */
- if (default_gtf)
- h_blank = ((image_width * GTF_D_C_PRIME * hfreq) -
- (image_width * GTF_D_M_PRIME * 1000) +
- (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) / 2) /
- (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000);
- else
- h_blank = ((image_width * GTF_S_C_PRIME * hfreq) -
- (image_width * GTF_S_M_PRIME * 1000) +
- (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) / 2) /
- (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000);
-
- h_blank = h_blank - h_blank % (2 * GTF_CELL_GRAN);
- frame_width = image_width + h_blank;
-
- pix_clk = (image_width + h_blank) * hfreq;
- pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
-
- hsync = (frame_width * 8 + 50) / 100;
- hsync = hsync - hsync % GTF_CELL_GRAN;
-
- h_fp = h_blank / 2 - hsync;
-
- fmt->bt.polarities = polarities;
- fmt->bt.width = image_width;
- fmt->bt.height = image_height;
- fmt->bt.hfrontporch = h_fp;
- fmt->bt.vfrontporch = v_fp;
- fmt->bt.hsync = hsync;
- fmt->bt.vsync = vsync;
- fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
- fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
- fmt->bt.pixelclock = pix_clk;
- fmt->bt.standards = V4L2_DV_BT_STD_GTF;
- if (!default_gtf)
- fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
- return true;
+
+ /* Expand the simple continued fraction back to an integer fraction. */
+ x = 0;
+ y = 1;
+
+ for (i = n; i > 0; --i) {
+ r = y;
+ y = an[i-1] * y + x;
+ x = r;
+ }
+
+ *numerator = y;
+ *denominator = x;
+ kfree(an);
}
-EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
+EXPORT_SYMBOL_GPL(v4l2_simplify_fraction);
-/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
- * 0x15 and 0x16 from the EDID.
- * @hor_landscape - byte 0x15 from the EDID.
- * @vert_portrait - byte 0x16 from the EDID.
- *
- * Determines the aspect ratio from the EDID.
- * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
- * "Horizontal and Vertical Screen Size or Aspect Ratio"
+/*
+ * Convert a fraction to a frame interval in 100ns multiples. The idea here is
+ * to compute numerator / denominator * 10000000 using 32 bit fixed point
+ * arithmetic only.
*/
-struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
+u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
{
- struct v4l2_fract aspect = { 16, 9 };
- u32 tmp;
- u8 ratio;
-
- /* Nothing filled in, fallback to 16:9 */
- if (!hor_landscape && !vert_portrait)
- return aspect;
- /* Both filled in, so they are interpreted as the screen size in cm */
- if (hor_landscape && vert_portrait) {
- aspect.numerator = hor_landscape;
- aspect.denominator = vert_portrait;
- return aspect;
- }
- /* Only one is filled in, so interpret them as a ratio:
- (val + 99) / 100 */
- ratio = hor_landscape | vert_portrait;
- /* Change some rounded values into the exact aspect ratio */
- if (ratio == 79) {
- aspect.numerator = 16;
- aspect.denominator = 9;
- } else if (ratio == 34) {
- aspect.numerator = 4;
- aspect.numerator = 3;
- } else if (ratio == 68) {
- aspect.numerator = 15;
- aspect.numerator = 9;
- } else {
- aspect.numerator = hor_landscape + 99;
- aspect.denominator = 100;
+ u32 multiplier;
+
+ /* Saturate the result if the operation would overflow. */
+ if (denominator == 0 ||
+ numerator/denominator >= ((u32)-1)/10000000)
+ return (u32)-1;
+
+ /*
+ * Divide both the denominator and the multiplier by two until
+ * numerator * multiplier doesn't overflow. If anyone knows a better
+ * algorithm please let me know.
+ */
+ multiplier = 10000000;
+ while (numerator > ((u32)-1)/multiplier) {
+ multiplier /= 2;
+ denominator /= 2;
}
- if (hor_landscape)
- return aspect;
- /* The aspect ratio is for portrait, so swap numerator and denominator */
- tmp = aspect.denominator;
- aspect.denominator = aspect.numerator;
- aspect.numerator = tmp;
- return aspect;
+
+ return denominator ? numerator * multiplier / denominator : 0;
}
-EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
+EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
-const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
- const struct v4l2_discrete_probe *probe,
- s32 width, s32 height)
+int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
+ unsigned int num_of_fw_link_freqs,
+ const s64 *driver_link_freqs,
+ unsigned int num_of_driver_link_freqs,
+ unsigned long *bitmap)
{
- int i;
- u32 error, min_error = UINT_MAX;
- const struct v4l2_frmsize_discrete *size, *best = NULL;
+ unsigned int i;
- if (!probe)
- return best;
+ *bitmap = 0;
- for (i = 0, size = probe->sizes; i < probe->num_sizes; i++, size++) {
- error = abs(size->width - width) + abs(size->height - height);
- if (error < min_error) {
- min_error = error;
- best = size;
- }
- if (!error)
+ if (!num_of_fw_link_freqs) {
+ dev_err(dev, "no link frequencies in firmware\n");
+ return -ENODATA;
+ }
+
+ for (i = 0; i < num_of_fw_link_freqs; i++) {
+ unsigned int j;
+
+ for (j = 0; j < num_of_driver_link_freqs; j++) {
+ if (fw_link_freqs[i] != driver_link_freqs[j])
+ continue;
+
+ dev_dbg(dev, "enabling link frequency %lld Hz\n",
+ driver_link_freqs[j]);
+ *bitmap |= BIT(j);
break;
+ }
}
- return best;
+ if (!*bitmap) {
+ dev_err(dev, "no matching link frequencies found\n");
+
+ dev_dbg(dev, "specified in firmware:\n");
+ for (i = 0; i < num_of_fw_link_freqs; i++)
+ dev_dbg(dev, "\t%llu Hz\n", fw_link_freqs[i]);
+
+ dev_dbg(dev, "driver supported:\n");
+ for (i = 0; i < num_of_driver_link_freqs; i++)
+ dev_dbg(dev, "\t%lld Hz\n", driver_link_freqs[i]);
+
+ return -ENOENT;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(v4l2_find_nearest_format);
+EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap);
-void v4l2_get_timestamp(struct timeval *tv)
+struct clk *__devm_v4l2_sensor_clk_get(struct device *dev, const char *id,
+ bool legacy, bool fixed_rate,
+ unsigned long clk_rate)
{
- struct timespec ts;
+ bool of_node = is_of_node(dev_fwnode(dev));
+ const char *clk_id __free(kfree) = NULL;
+ struct clk_hw *clk_hw;
+ struct clk *clk;
+ u32 rate = clk_rate;
+ int ret = 0;
+
+ clk = devm_clk_get_optional(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ /*
+ * If the caller didn't request a fixed rate, retrieve it from the
+ * clock-frequency property. -EINVAL indicates the property is absent,
+ * and is not a failure. Other errors, or success with a clock-frequency
+ * value of 0, are hard failures.
+ */
+ if (!fixed_rate || !clk_rate) {
+ ret = device_property_read_u32(dev, "clock-frequency", &rate);
+ if ((ret && ret != -EINVAL) || (!ret && !rate))
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (clk) {
+ /*
+ * On non-OF platforms, or when legacy behaviour is requested,
+ * set the clock rate if a rate has been specified by the caller
+ * or by the clock-frequency property.
+ */
+ if (rate && (!of_node || legacy)) {
+ ret = clk_set_rate(clk, rate);
+ if (ret) {
+ dev_err(dev, "Failed to set clock rate: %u\n",
+ rate);
+ return ERR_PTR(ret);
+ }
+ }
+ return clk;
+ }
+
+ /*
+ * Register a dummy fixed clock on non-OF platforms or when legacy
+ * behaviour is requested. This required the common clock framework.
+ */
+ if (!IS_ENABLED(CONFIG_COMMON_CLK) || (of_node && !legacy))
+ return ERR_PTR(-ENOENT);
+
+ /* We need a rate to create a clock. */
+ if (ret)
+ return ERR_PTR(ret == -EINVAL ? -EPROBE_DEFER : ret);
+
+ if (!id) {
+ clk_id = kasprintf(GFP_KERNEL, "clk-%s", dev_name(dev));
+ if (!clk_id)
+ return ERR_PTR(-ENOMEM);
+ id = clk_id;
+ }
+
+ clk_hw = devm_clk_hw_register_fixed_rate(dev, id, NULL, 0, rate);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
- ktime_get_ts(&ts);
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ return clk_hw->clk;
}
-EXPORT_SYMBOL_GPL(v4l2_get_timestamp);
+EXPORT_SYMBOL_GPL(__devm_v4l2_sensor_clk_get);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8f7a6a454a4c..2c88e1175a10 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
* Separated from fs stuff by Arnd Bergmann <arnd@arndb.de>
@@ -7,7 +8,7 @@
* Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
* Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
* Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be)
- * Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*
* These routines maintain argument size conversion between 32bit and 64bit
* ioctls.
@@ -18,132 +19,75 @@
#include <linux/videodev2.h>
#include <linux/v4l2-subdev.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-ioctl.h>
-static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long ret = -ENOIOCTLCMD;
-
- if (file->f_op->unlocked_ioctl)
- ret = file->f_op->unlocked_ioctl(file, cmd, arg);
-
- return ret;
-}
-
-
-struct v4l2_clip32 {
- struct v4l2_rect c;
- compat_caddr_t next;
-};
+/*
+ * Per-ioctl data copy handlers.
+ *
+ * Those come in pairs, with a get_v4l2_foo() and a put_v4l2_foo() routine,
+ * where "v4l2_foo" is the name of the V4L2 struct.
+ *
+ * They basically get two __user pointers, one with a 32-bits struct that
+ * came from the userspace call and a 64-bits struct, also allocated as
+ * userspace, but filled internally by do_video_ioctl().
+ *
+ * For ioctls that have pointers inside it, the functions will also
+ * receive an ancillary buffer with extra space, used to pass extra
+ * data to the routine.
+ */
struct v4l2_window32 {
struct v4l2_rect w;
- __u32 field; /* enum v4l2_field */
+ __u32 field; /* enum v4l2_field */
__u32 chromakey;
- compat_caddr_t clips; /* actually struct v4l2_clip32 * */
- __u32 clipcount;
- compat_caddr_t bitmap;
+ compat_caddr_t clips; /* always NULL */
+ __u32 clipcount; /* always 0 */
+ compat_caddr_t bitmap; /* always NULL */
+ __u8 global_alpha;
};
-static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+static int get_v4l2_window32(struct v4l2_window *p64,
+ struct v4l2_window32 __user *p32)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
- copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
- get_user(kp->field, &up->field) ||
- get_user(kp->chromakey, &up->chromakey) ||
- get_user(kp->clipcount, &up->clipcount))
- return -EFAULT;
- if (kp->clipcount > 2048)
- return -EINVAL;
- if (kp->clipcount) {
- struct v4l2_clip32 __user *uclips;
- struct v4l2_clip __user *kclips;
- int n = kp->clipcount;
- compat_caddr_t p;
-
- if (get_user(p, &up->clips))
- return -EFAULT;
- uclips = compat_ptr(p);
- kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
- kp->clips = kclips;
- while (--n >= 0) {
- if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
- return -EFAULT;
- if (put_user(n ? kclips + 1 : NULL, &kclips->next))
- return -EFAULT;
- uclips += 1;
- kclips += 1;
- }
- } else
- kp->clips = NULL;
- return 0;
-}
+ struct v4l2_window32 w32;
-static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
-{
- if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
- put_user(kp->field, &up->field) ||
- put_user(kp->chromakey, &up->chromakey) ||
- put_user(kp->clipcount, &up->clipcount))
- return -EFAULT;
- return 0;
-}
-
-static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
-{
- if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
- return -EFAULT;
- return 0;
-}
-
-static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
- struct v4l2_pix_format_mplane __user *up)
-{
- if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
- return -EFAULT;
- return 0;
-}
-
-static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
-{
- if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
+ if (copy_from_user(&w32, p32, sizeof(w32)))
return -EFAULT;
- return 0;
-}
-static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
- struct v4l2_pix_format_mplane __user *up)
-{
- if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
- return -EFAULT;
- return 0;
-}
+ *p64 = (struct v4l2_window) {
+ .w = w32.w,
+ .field = w32.field,
+ .chromakey = w32.chromakey,
+ .clips = NULL,
+ .clipcount = 0,
+ .bitmap = NULL,
+ .global_alpha = w32.global_alpha,
+ };
-static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
-{
- if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
- return -EFAULT;
return 0;
}
-static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+static int put_v4l2_window32(struct v4l2_window *p64,
+ struct v4l2_window32 __user *p32)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
- return -EFAULT;
- return 0;
-}
+ struct v4l2_window32 w32;
+
+ memset(&w32, 0, sizeof(w32));
+ w32 = (struct v4l2_window32) {
+ .w = p64->w,
+ .field = p64->field,
+ .chromakey = p64->chromakey,
+ .clips = 0,
+ .clipcount = 0,
+ .bitmap = 0,
+ .global_alpha = p64->global_alpha,
+ };
-static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
-{
- if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
+ if (copy_to_user(p32, &w32, sizeof(w32)))
return -EFAULT;
- return 0;
-}
-static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
-{
- if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
- return -EFAULT;
return 0;
}
@@ -155,6 +99,8 @@ struct v4l2_format32 {
struct v4l2_window32 win;
struct v4l2_vbi_format vbi;
struct v4l2_sliced_vbi_format sliced;
+ struct v4l2_sdr_format sdr;
+ struct v4l2_meta_format meta;
__u8 raw_data[200]; /* user-defined */
} fmt;
};
@@ -166,6 +112,13 @@ struct v4l2_format32 {
* return: number of created buffers
* @memory: buffer memory type
* @format: frame format, for which buffers are requested
+ * @capabilities: capabilities of this buffer type.
+ * @flags: additional buffer management attributes (ignored unless the
+ * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and
+ * configured for MMAP streaming I/O).
+ * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set
+ * this field indicate the maximum possible number of buffers
+ * for this queue.
* @reserved: future extensions
*/
struct v4l2_create_buffers32 {
@@ -173,121 +126,141 @@ struct v4l2_create_buffers32 {
__u32 count;
__u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
- __u32 reserved[8];
+ __u32 capabilities;
+ __u32 flags;
+ __u32 max_num_buffers;
+ __u32 reserved[5];
};
-static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int get_v4l2_format32(struct v4l2_format *p64,
+ struct v4l2_format32 __user *p32)
{
- switch (kp->type) {
+ if (get_user(p64->type, &p32->type))
+ return -EFAULT;
+
+ switch (p64->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ return copy_from_user(&p64->fmt.pix, &p32->fmt.pix,
+ sizeof(p64->fmt.pix)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
- &up->fmt.pix_mp);
+ return copy_from_user(&p64->fmt.pix_mp, &p32->fmt.pix_mp,
+ sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
+ return get_v4l2_window32(&p64->fmt.win, &p32->fmt.win);
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
- return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
+ return copy_from_user(&p64->fmt.vbi, &p32->fmt.vbi,
+ sizeof(p64->fmt.vbi)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
+ return copy_from_user(&p64->fmt.sliced, &p32->fmt.sliced,
+ sizeof(p64->fmt.sliced)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ return copy_from_user(&p64->fmt.sdr, &p32->fmt.sdr,
+ sizeof(p64->fmt.sdr)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return copy_from_user(&p64->fmt.meta, &p32->fmt.meta,
+ sizeof(p64->fmt.meta)) ? -EFAULT : 0;
default:
- printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
- kp->type);
return -EINVAL;
}
}
-static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
-{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
- get_user(kp->type, &up->type))
- return -EFAULT;
- return __get_v4l2_format32(kp, up);
-}
-
-static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+static int get_v4l2_create32(struct v4l2_create_buffers *p64,
+ struct v4l2_create_buffers32 __user *p32)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
- return -EFAULT;
- return __get_v4l2_format32(&kp->format, &up->format);
+ if (copy_from_user(p64, p32,
+ offsetof(struct v4l2_create_buffers32, format)))
+ return -EFAULT;
+ if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags)))
+ return -EFAULT;
+ if (copy_from_user(&p64->max_num_buffers, &p32->max_num_buffers,
+ sizeof(p32->max_num_buffers)))
+ return -EFAULT;
+ return get_v4l2_format32(&p64->format, &p32->format);
}
-static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int put_v4l2_format32(struct v4l2_format *p64,
+ struct v4l2_format32 __user *p32)
{
- switch (kp->type) {
+ switch (p64->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ return copy_to_user(&p32->fmt.pix, &p64->fmt.pix,
+ sizeof(p64->fmt.pix)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
- &up->fmt.pix_mp);
+ return copy_to_user(&p32->fmt.pix_mp, &p64->fmt.pix_mp,
+ sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
+ return put_v4l2_window32(&p64->fmt.win, &p32->fmt.win);
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
- return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
+ return copy_to_user(&p32->fmt.vbi, &p64->fmt.vbi,
+ sizeof(p64->fmt.vbi)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
+ return copy_to_user(&p32->fmt.sliced, &p64->fmt.sliced,
+ sizeof(p64->fmt.sliced)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ return copy_to_user(&p32->fmt.sdr, &p64->fmt.sdr,
+ sizeof(p64->fmt.sdr)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return copy_to_user(&p32->fmt.meta, &p64->fmt.meta,
+ sizeof(p64->fmt.meta)) ? -EFAULT : 0;
default:
- printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
- kp->type);
return -EINVAL;
}
}
-static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int put_v4l2_create32(struct v4l2_create_buffers *p64,
+ struct v4l2_create_buffers32 __user *p32)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) ||
- put_user(kp->type, &up->type))
+ if (copy_to_user(p32, p64,
+ offsetof(struct v4l2_create_buffers32, format)) ||
+ put_user(p64->capabilities, &p32->capabilities) ||
+ put_user(p64->flags, &p32->flags) ||
+ put_user(p64->max_num_buffers, &p32->max_num_buffers) ||
+ copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
return -EFAULT;
- return __put_v4l2_format32(kp, up);
-}
-
-static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
-{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format.fmt)))
- return -EFAULT;
- return __put_v4l2_format32(&kp->format, &up->format);
+ return put_v4l2_format32(&p64->format, &p32->format);
}
struct v4l2_standard32 {
__u32 index;
- __u32 id[2]; /* __u64 would get the alignment wrong */
+ compat_u64 id;
__u8 name[24];
struct v4l2_fract frameperiod; /* Frames, not fields */
__u32 framelines;
__u32 reserved[4];
};
-static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int get_v4l2_standard32(struct v4l2_standard *p64,
+ struct v4l2_standard32 __user *p32)
{
/* other fields are not set by the user, nor used by the driver */
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
- get_user(kp->index, &up->index))
- return -EFAULT;
- return 0;
+ return get_user(p64->index, &p32->index);
}
-static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int put_v4l2_standard32(struct v4l2_standard *p64,
+ struct v4l2_standard32 __user *p32)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
- put_user(kp->index, &up->index) ||
- copy_to_user(up->id, &kp->id, sizeof(__u64)) ||
- copy_to_user(up->name, kp->name, 24) ||
- copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
- put_user(kp->framelines, &up->framelines) ||
- copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
- return -EFAULT;
+ if (put_user(p64->index, &p32->index) ||
+ put_user(p64->id, &p32->id) ||
+ copy_to_user(p32->name, p64->name, sizeof(p32->name)) ||
+ copy_to_user(&p32->frameperiod, &p64->frameperiod,
+ sizeof(p32->frameperiod)) ||
+ put_user(p64->framelines, &p32->framelines) ||
+ copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
+ return -EFAULT;
return 0;
}
@@ -303,13 +276,44 @@ struct v4l2_plane32 {
__u32 reserved[11];
};
+/*
+ * This is correct for all architectures including i386, but not x32,
+ * which has different alignment requirements for timestamp
+ */
struct v4l2_buffer32 {
__u32 index;
__u32 type; /* enum v4l2_buf_type */
__u32 bytesused;
__u32 flags;
__u32 field; /* enum v4l2_field */
- struct compat_timeval timestamp;
+ struct {
+ compat_s64 tv_sec;
+ compat_s64 tv_usec;
+ } timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+
+ /* memory location */
+ __u32 memory; /* enum v4l2_memory */
+ union {
+ __u32 offset;
+ compat_long_t userptr;
+ compat_caddr_t planes;
+ __s32 fd;
+ } m;
+ __u32 length;
+ __u32 reserved2;
+ __s32 request_fd;
+};
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+struct v4l2_buffer32_time32 {
+ __u32 index;
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 bytesused;
+ __u32 flags;
+ __u32 field; /* enum v4l2_field */
+ struct old_timeval32 timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
@@ -323,248 +327,292 @@ struct v4l2_buffer32 {
} m;
__u32 length;
__u32 reserved2;
- __u32 reserved;
+ __s32 request_fd;
};
+#endif
-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
- enum v4l2_memory memory)
+static int get_v4l2_plane32(struct v4l2_plane *p64,
+ struct v4l2_plane32 __user *p32,
+ enum v4l2_memory memory)
{
- void __user *up_pln;
- compat_long_t p;
+ struct v4l2_plane32 plane32;
+ typeof(p64->m) m = {};
- if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
- copy_in_user(&up->data_offset, &up32->data_offset,
- sizeof(__u32)))
+ if (copy_from_user(&plane32, p32, sizeof(plane32)))
return -EFAULT;
- if (memory == V4L2_MEMORY_USERPTR) {
- if (get_user(p, &up32->m.userptr))
- return -EFAULT;
- up_pln = compat_ptr(p);
- if (put_user((unsigned long)up_pln, &up->m.userptr))
- return -EFAULT;
- } else if (memory == V4L2_MEMORY_DMABUF) {
- if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
- return -EFAULT;
- } else {
- if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
- sizeof(__u32)))
- return -EFAULT;
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ m.mem_offset = plane32.m.mem_offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ m.userptr = (unsigned long)compat_ptr(plane32.m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ m.fd = plane32.m.fd;
+ break;
}
+ memset(p64, 0, sizeof(*p64));
+ *p64 = (struct v4l2_plane) {
+ .bytesused = plane32.bytesused,
+ .length = plane32.length,
+ .m = m,
+ .data_offset = plane32.data_offset,
+ };
+
return 0;
}
-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
- enum v4l2_memory memory)
+static int put_v4l2_plane32(struct v4l2_plane *p64,
+ struct v4l2_plane32 __user *p32,
+ enum v4l2_memory memory)
{
- if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
- copy_in_user(&up32->data_offset, &up->data_offset,
- sizeof(__u32)))
+ struct v4l2_plane32 plane32;
+
+ memset(&plane32, 0, sizeof(plane32));
+ plane32 = (struct v4l2_plane32) {
+ .bytesused = p64->bytesused,
+ .length = p64->length,
+ .data_offset = p64->data_offset,
+ };
+
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ plane32.m.mem_offset = p64->m.mem_offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ plane32.m.userptr = (uintptr_t)(p64->m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ plane32.m.fd = p64->m.fd;
+ break;
+ }
+
+ if (copy_to_user(p32, &plane32, sizeof(plane32)))
return -EFAULT;
- /* For MMAP, driver might've set up the offset, so copy it back.
- * USERPTR stays the same (was userspace-provided), so no copying. */
- if (memory == V4L2_MEMORY_MMAP)
- if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
- sizeof(__u32)))
- return -EFAULT;
- /* For DMABUF, driver might've set up the fd, so copy it back. */
- if (memory == V4L2_MEMORY_DMABUF)
- if (copy_in_user(&up32->m.fd, &up->m.fd,
- sizeof(int)))
- return -EFAULT;
+ return 0;
+}
+
+static int get_v4l2_buffer32(struct v4l2_buffer *vb,
+ struct v4l2_buffer32 __user *arg)
+{
+ struct v4l2_buffer32 vb32;
+
+ if (copy_from_user(&vb32, arg, sizeof(vb32)))
+ return -EFAULT;
+
+ memset(vb, 0, sizeof(*vb));
+ *vb = (struct v4l2_buffer) {
+ .index = vb32.index,
+ .type = vb32.type,
+ .bytesused = vb32.bytesused,
+ .flags = vb32.flags,
+ .field = vb32.field,
+ .timestamp.tv_sec = vb32.timestamp.tv_sec,
+ .timestamp.tv_usec = vb32.timestamp.tv_usec,
+ .timecode = vb32.timecode,
+ .sequence = vb32.sequence,
+ .memory = vb32.memory,
+ .m.offset = vb32.m.offset,
+ .length = vb32.length,
+ .request_fd = vb32.request_fd,
+ };
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ vb->m.offset = vb32.m.offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ vb->m.fd = vb32.m.fd;
+ break;
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
+ vb->m.planes = (void __force *)
+ compat_ptr(vb32.m.planes);
return 0;
}
-static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+#ifdef CONFIG_COMPAT_32BIT_TIME
+static int get_v4l2_buffer32_time32(struct v4l2_buffer *vb,
+ struct v4l2_buffer32_time32 __user *arg)
{
- struct v4l2_plane32 __user *uplane32;
- struct v4l2_plane __user *uplane;
- compat_caddr_t p;
- int num_planes;
- int ret;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
- get_user(kp->index, &up->index) ||
- get_user(kp->type, &up->type) ||
- get_user(kp->flags, &up->flags) ||
- get_user(kp->memory, &up->memory))
- return -EFAULT;
-
- if (V4L2_TYPE_IS_OUTPUT(kp->type))
- if (get_user(kp->bytesused, &up->bytesused) ||
- get_user(kp->field, &up->field) ||
- get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
- get_user(kp->timestamp.tv_usec,
- &up->timestamp.tv_usec))
- return -EFAULT;
-
- if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- if (get_user(kp->length, &up->length))
- return -EFAULT;
-
- num_planes = kp->length;
- if (num_planes == 0) {
- kp->m.planes = NULL;
- /* num_planes == 0 is legal, e.g. when userspace doesn't
- * need planes array on DQBUF*/
- return 0;
- }
+ struct v4l2_buffer32_time32 vb32;
- if (get_user(p, &up->m.planes))
- return -EFAULT;
-
- uplane32 = compat_ptr(p);
- if (!access_ok(VERIFY_READ, uplane32,
- num_planes * sizeof(struct v4l2_plane32)))
- return -EFAULT;
-
- /* We don't really care if userspace decides to kill itself
- * by passing a very big num_planes value */
- uplane = compat_alloc_user_space(num_planes *
- sizeof(struct v4l2_plane));
- kp->m.planes = uplane;
-
- while (--num_planes >= 0) {
- ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
- if (ret)
- return ret;
- ++uplane;
- ++uplane32;
- }
- } else {
- switch (kp->memory) {
- case V4L2_MEMORY_MMAP:
- if (get_user(kp->length, &up->length) ||
- get_user(kp->m.offset, &up->m.offset))
- return -EFAULT;
- break;
- case V4L2_MEMORY_USERPTR:
- {
- compat_long_t tmp;
+ if (copy_from_user(&vb32, arg, sizeof(vb32)))
+ return -EFAULT;
- if (get_user(kp->length, &up->length) ||
- get_user(tmp, &up->m.userptr))
- return -EFAULT;
+ *vb = (struct v4l2_buffer) {
+ .index = vb32.index,
+ .type = vb32.type,
+ .bytesused = vb32.bytesused,
+ .flags = vb32.flags,
+ .field = vb32.field,
+ .timestamp.tv_sec = vb32.timestamp.tv_sec,
+ .timestamp.tv_usec = vb32.timestamp.tv_usec,
+ .timecode = vb32.timecode,
+ .sequence = vb32.sequence,
+ .memory = vb32.memory,
+ .m.offset = vb32.m.offset,
+ .length = vb32.length,
+ .request_fd = vb32.request_fd,
+ };
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ vb->m.offset = vb32.m.offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ vb->m.fd = vb32.m.fd;
+ break;
+ }
- kp->m.userptr = (unsigned long)compat_ptr(tmp);
- }
- break;
- case V4L2_MEMORY_OVERLAY:
- if (get_user(kp->m.offset, &up->m.offset))
- return -EFAULT;
- break;
- case V4L2_MEMORY_DMABUF:
- if (get_user(kp->m.fd, &up->m.fd))
- return -EFAULT;
- break;
- }
+ if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
+ vb->m.planes = (void __force *)
+ compat_ptr(vb32.m.planes);
+
+ return 0;
+}
+#endif
+
+static int put_v4l2_buffer32(struct v4l2_buffer *vb,
+ struct v4l2_buffer32 __user *arg)
+{
+ struct v4l2_buffer32 vb32;
+
+ memset(&vb32, 0, sizeof(vb32));
+ vb32 = (struct v4l2_buffer32) {
+ .index = vb->index,
+ .type = vb->type,
+ .bytesused = vb->bytesused,
+ .flags = vb->flags,
+ .field = vb->field,
+ .timestamp.tv_sec = vb->timestamp.tv_sec,
+ .timestamp.tv_usec = vb->timestamp.tv_usec,
+ .timecode = vb->timecode,
+ .sequence = vb->sequence,
+ .memory = vb->memory,
+ .m.offset = vb->m.offset,
+ .length = vb->length,
+ .request_fd = vb->request_fd,
+ };
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ vb32.m.offset = vb->m.offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ vb32.m.userptr = (uintptr_t)(vb->m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ vb32.m.fd = vb->m.fd;
+ break;
}
+ if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
+ vb32.m.planes = (uintptr_t)vb->m.planes;
+
+ if (copy_to_user(arg, &vb32, sizeof(vb32)))
+ return -EFAULT;
+
return 0;
}
-static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+#ifdef CONFIG_COMPAT_32BIT_TIME
+static int put_v4l2_buffer32_time32(struct v4l2_buffer *vb,
+ struct v4l2_buffer32_time32 __user *arg)
{
- struct v4l2_plane32 __user *uplane32;
- struct v4l2_plane __user *uplane;
- compat_caddr_t p;
- int num_planes;
- int ret;
-
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
- put_user(kp->index, &up->index) ||
- put_user(kp->type, &up->type) ||
- put_user(kp->flags, &up->flags) ||
- put_user(kp->memory, &up->memory))
- return -EFAULT;
-
- if (put_user(kp->bytesused, &up->bytesused) ||
- put_user(kp->field, &up->field) ||
- put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
- put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
- copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
- put_user(kp->sequence, &up->sequence) ||
- put_user(kp->reserved2, &up->reserved2) ||
- put_user(kp->reserved, &up->reserved))
- return -EFAULT;
-
- if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- num_planes = kp->length;
- if (num_planes == 0)
- return 0;
-
- uplane = kp->m.planes;
- if (get_user(p, &up->m.planes))
- return -EFAULT;
- uplane32 = compat_ptr(p);
-
- while (--num_planes >= 0) {
- ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
- if (ret)
- return ret;
- ++uplane;
- ++uplane32;
- }
- } else {
- switch (kp->memory) {
- case V4L2_MEMORY_MMAP:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.offset, &up->m.offset))
- return -EFAULT;
- break;
- case V4L2_MEMORY_USERPTR:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.userptr, &up->m.userptr))
- return -EFAULT;
- break;
- case V4L2_MEMORY_OVERLAY:
- if (put_user(kp->m.offset, &up->m.offset))
- return -EFAULT;
- break;
- case V4L2_MEMORY_DMABUF:
- if (put_user(kp->m.fd, &up->m.fd))
- return -EFAULT;
- break;
- }
+ struct v4l2_buffer32_time32 vb32;
+
+ memset(&vb32, 0, sizeof(vb32));
+ vb32 = (struct v4l2_buffer32_time32) {
+ .index = vb->index,
+ .type = vb->type,
+ .bytesused = vb->bytesused,
+ .flags = vb->flags,
+ .field = vb->field,
+ .timestamp.tv_sec = vb->timestamp.tv_sec,
+ .timestamp.tv_usec = vb->timestamp.tv_usec,
+ .timecode = vb->timecode,
+ .sequence = vb->sequence,
+ .memory = vb->memory,
+ .m.offset = vb->m.offset,
+ .length = vb->length,
+ .request_fd = vb->request_fd,
+ };
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ vb32.m.offset = vb->m.offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ vb32.m.userptr = (uintptr_t)(vb->m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ vb32.m.fd = vb->m.fd;
+ break;
}
+ if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
+ vb32.m.planes = (uintptr_t)vb->m.planes;
+
+ if (copy_to_user(arg, &vb32, sizeof(vb32)))
+ return -EFAULT;
+
return 0;
}
+#endif
struct v4l2_framebuffer32 {
__u32 capability;
__u32 flags;
- compat_caddr_t base;
- struct v4l2_pix_format fmt;
+ compat_caddr_t base;
+ struct {
+ __u32 width;
+ __u32 height;
+ __u32 pixelformat;
+ __u32 field;
+ __u32 bytesperline;
+ __u32 sizeimage;
+ __u32 colorspace;
+ __u32 priv;
+ } fmt;
};
-static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer *p64,
+ struct v4l2_framebuffer32 __user *p32)
{
- u32 tmp;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
- get_user(tmp, &up->base) ||
- get_user(kp->capability, &up->capability) ||
- get_user(kp->flags, &up->flags))
- return -EFAULT;
- kp->base = compat_ptr(tmp);
- get_v4l2_pix_format(&kp->fmt, &up->fmt);
+ if (get_user(p64->capability, &p32->capability) ||
+ get_user(p64->flags, &p32->flags) ||
+ copy_from_user(&p64->fmt, &p32->fmt, sizeof(p64->fmt)))
+ return -EFAULT;
+ p64->base = NULL;
+
return 0;
}
-static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int put_v4l2_framebuffer32(struct v4l2_framebuffer *p64,
+ struct v4l2_framebuffer32 __user *p32)
{
- u32 tmp = (u32)((unsigned long)kp->base);
-
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
- put_user(tmp, &up->base) ||
- put_user(kp->capability, &up->capability) ||
- put_user(kp->flags, &up->flags))
- return -EFAULT;
- put_v4l2_pix_format(&kp->fmt, &up->fmt);
+ if (put_user((uintptr_t)p64->base, &p32->base) ||
+ put_user(p64->capability, &p32->capability) ||
+ put_user(p64->flags, &p32->flags) ||
+ copy_to_user(&p32->fmt, &p64->fmt, sizeof(p64->fmt)))
+ return -EFAULT;
+
return 0;
}
@@ -574,33 +622,39 @@ struct v4l2_input32 {
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
__u32 tuner; /* Associated tuner */
- v4l2_std_id std;
+ compat_u64 std;
__u32 status;
- __u32 reserved[4];
-} __attribute__ ((packed));
+ __u32 capabilities;
+ __u32 reserved[3];
+};
-/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
- Otherwise it is identical to the 32-bit version. */
-static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+/*
+ * The 64-bit v4l2_input struct has extra padding at the end of the struct.
+ * Otherwise it is identical to the 32-bit version.
+ */
+static inline int get_v4l2_input32(struct v4l2_input *p64,
+ struct v4l2_input32 __user *p32)
{
- if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
+ if (copy_from_user(p64, p32, sizeof(*p32)))
return -EFAULT;
return 0;
}
-static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+static inline int put_v4l2_input32(struct v4l2_input *p64,
+ struct v4l2_input32 __user *p32)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
+ if (copy_to_user(p32, p64, sizeof(*p32)))
return -EFAULT;
return 0;
}
struct v4l2_ext_controls32 {
- __u32 ctrl_class;
- __u32 count;
- __u32 error_idx;
- __u32 reserved[2];
- compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
+ __u32 which;
+ __u32 count;
+ __u32 error_idx;
+ __s32 request_fd;
+ __u32 reserved[1];
+ compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
};
struct v4l2_ext_control32 {
@@ -614,133 +668,143 @@ struct v4l2_ext_control32 {
};
} __attribute__ ((packed));
-/* The following function really belong in v4l2-common, but that causes
- a circular dependency between modules. We need to think about this, but
- for now this will do. */
-
-/* Return non-zero if this control is a pointer type. Currently only
- type STRING is a pointer type. */
-static inline int ctrl_is_pointer(u32 id)
+/* Return true if this control is a pointer type. */
+static inline bool ctrl_is_pointer(struct file *file, u32 id)
{
- switch (id) {
- case V4L2_CID_RDS_TX_PS_NAME:
- case V4L2_CID_RDS_TX_RADIO_TEXT:
- return 1;
- default:
- return 0;
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct v4l2_ctrl_handler *hdl = NULL;
+ struct v4l2_query_ext_ctrl qec = { id };
+ const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
+
+ if (fh->ctrl_handler)
+ hdl = fh->ctrl_handler;
+ else if (vdev->ctrl_handler)
+ hdl = vdev->ctrl_handler;
+
+ if (hdl) {
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id);
+
+ return ctrl && ctrl->is_ptr;
}
+
+ if (!ops || !ops->vidioc_query_ext_ctrl)
+ return false;
+
+ return !ops->vidioc_query_ext_ctrl(file, NULL, &qec) &&
+ (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
}
-static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int get_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
+ struct v4l2_ext_controls32 __user *p32)
{
- struct v4l2_ext_control32 __user *ucontrols;
- struct v4l2_ext_control __user *kcontrols;
- int n;
- compat_caddr_t p;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
- get_user(kp->ctrl_class, &up->ctrl_class) ||
- get_user(kp->count, &up->count) ||
- get_user(kp->error_idx, &up->error_idx) ||
- copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
- return -EFAULT;
- n = kp->count;
- if (n == 0) {
- kp->controls = NULL;
- return 0;
- }
- if (get_user(p, &up->controls))
- return -EFAULT;
- ucontrols = compat_ptr(p);
- if (!access_ok(VERIFY_READ, ucontrols,
- n * sizeof(struct v4l2_ext_control32)))
+ struct v4l2_ext_controls32 ec32;
+
+ if (copy_from_user(&ec32, p32, sizeof(ec32)))
return -EFAULT;
- kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
- kp->controls = kcontrols;
- while (--n >= 0) {
- if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
- return -EFAULT;
- if (ctrl_is_pointer(kcontrols->id)) {
- void __user *s;
-
- if (get_user(p, &ucontrols->string))
- return -EFAULT;
- s = compat_ptr(p);
- if (put_user(s, &kcontrols->string))
- return -EFAULT;
- }
- ucontrols++;
- kcontrols++;
- }
+
+ *p64 = (struct v4l2_ext_controls) {
+ .which = ec32.which,
+ .count = ec32.count,
+ .error_idx = ec32.error_idx,
+ .request_fd = ec32.request_fd,
+ .reserved[0] = ec32.reserved[0],
+ .controls = (void __force *)compat_ptr(ec32.controls),
+ };
+
return 0;
}
-static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
+ struct v4l2_ext_controls32 __user *p32)
{
- struct v4l2_ext_control32 __user *ucontrols;
- struct v4l2_ext_control __user *kcontrols = kp->controls;
- int n = kp->count;
- compat_caddr_t p;
-
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
- put_user(kp->ctrl_class, &up->ctrl_class) ||
- put_user(kp->count, &up->count) ||
- put_user(kp->error_idx, &up->error_idx) ||
- copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
- return -EFAULT;
- if (!kp->count)
- return 0;
-
- if (get_user(p, &up->controls))
- return -EFAULT;
- ucontrols = compat_ptr(p);
- if (!access_ok(VERIFY_WRITE, ucontrols,
- n * sizeof(struct v4l2_ext_control32)))
+ struct v4l2_ext_controls32 ec32;
+
+ memset(&ec32, 0, sizeof(ec32));
+ ec32 = (struct v4l2_ext_controls32) {
+ .which = p64->which,
+ .count = p64->count,
+ .error_idx = p64->error_idx,
+ .request_fd = p64->request_fd,
+ .reserved[0] = p64->reserved[0],
+ .controls = (uintptr_t)p64->controls,
+ };
+
+ if (copy_to_user(p32, &ec32, sizeof(ec32)))
return -EFAULT;
- while (--n >= 0) {
- unsigned size = sizeof(*ucontrols);
-
- /* Do not modify the pointer when copying a pointer control.
- The contents of the pointer was changed, not the pointer
- itself. */
- if (ctrl_is_pointer(kcontrols->id))
- size -= sizeof(ucontrols->value64);
- if (copy_in_user(ucontrols, kcontrols, size))
- return -EFAULT;
- ucontrols++;
- kcontrols++;
- }
return 0;
}
+#ifdef CONFIG_X86_64
+/*
+ * x86 is the only compat architecture with different struct alignment
+ * between 32-bit and 64-bit tasks.
+ */
struct v4l2_event32 {
__u32 type;
union {
+ compat_s64 value64;
__u8 data[64];
} u;
__u32 pending;
__u32 sequence;
- struct compat_timespec timestamp;
+ struct {
+ compat_s64 tv_sec;
+ compat_s64 tv_nsec;
+ } timestamp;
__u32 id;
__u32 reserved[8];
};
-static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
+static int put_v4l2_event32(struct v4l2_event *p64,
+ struct v4l2_event32 __user *p32)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
- put_user(kp->type, &up->type) ||
- copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
- put_user(kp->pending, &up->pending) ||
- put_user(kp->sequence, &up->sequence) ||
- put_compat_timespec(&kp->timestamp, &up->timestamp) ||
- put_user(kp->id, &up->id) ||
- copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
- return -EFAULT;
+ if (put_user(p64->type, &p32->type) ||
+ copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) ||
+ put_user(p64->pending, &p32->pending) ||
+ put_user(p64->sequence, &p32->sequence) ||
+ put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) ||
+ put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) ||
+ put_user(p64->id, &p32->id) ||
+ copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
+ return -EFAULT;
return 0;
}
-struct v4l2_subdev_edid32 {
+#endif
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+struct v4l2_event32_time32 {
+ __u32 type;
+ union {
+ compat_s64 value64;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct old_timespec32 timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+static int put_v4l2_event32_time32(struct v4l2_event *p64,
+ struct v4l2_event32_time32 __user *p32)
+{
+ if (put_user(p64->type, &p32->type) ||
+ copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) ||
+ put_user(p64->pending, &p32->pending) ||
+ put_user(p64->sequence, &p32->sequence) ||
+ put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) ||
+ put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) ||
+ put_user(p64->id, &p32->id) ||
+ copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
+ return -EFAULT;
+ return 0;
+}
+#endif
+
+struct v4l2_edid32 {
__u32 pad;
__u32 start_block;
__u32 blocks;
@@ -748,35 +812,34 @@ struct v4l2_subdev_edid32 {
compat_caddr_t edid;
};
-static int get_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subdev_edid32 __user *up)
+static int get_v4l2_edid32(struct v4l2_edid *p64,
+ struct v4l2_edid32 __user *p32)
{
- u32 tmp;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_subdev_edid32)) ||
- get_user(kp->pad, &up->pad) ||
- get_user(kp->start_block, &up->start_block) ||
- get_user(kp->blocks, &up->blocks) ||
- get_user(tmp, &up->edid) ||
- copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
- return -EFAULT;
- kp->edid = compat_ptr(tmp);
+ compat_uptr_t edid;
+
+ if (copy_from_user(p64, p32, offsetof(struct v4l2_edid32, edid)) ||
+ get_user(edid, &p32->edid))
+ return -EFAULT;
+
+ p64->edid = (void __force *)compat_ptr(edid);
return 0;
}
-static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subdev_edid32 __user *up)
+static int put_v4l2_edid32(struct v4l2_edid *p64,
+ struct v4l2_edid32 __user *p32)
{
- u32 tmp = (u32)((unsigned long)kp->edid);
-
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_subdev_edid32)) ||
- put_user(kp->pad, &up->pad) ||
- put_user(kp->start_block, &up->start_block) ||
- put_user(kp->blocks, &up->blocks) ||
- put_user(tmp, &up->edid) ||
- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
- return -EFAULT;
+ if (copy_to_user(p32, p64, offsetof(struct v4l2_edid32, edid)))
+ return -EFAULT;
return 0;
}
+/*
+ * List of ioctls that require 32-bits/64-bits conversion
+ *
+ * The V4L2 ioctls that aren't listed there don't have pointer arguments
+ * and the struct size is identical for both 32 and 64 bits versions, so
+ * they don't need translations.
+ */
#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
@@ -787,9 +850,9 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
-#define VIDIOC_SUBDEV_G_EDID32 _IOWR('V', 63, struct v4l2_subdev_edid32)
-#define VIDIOC_SUBDEV_S_EDID32 _IOWR('V', 64, struct v4l2_subdev_edid32)
-#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
+#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32)
+#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32)
+#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
@@ -797,207 +860,328 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32)
#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32)
-#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32)
-#define VIDIOC_STREAMON32 _IOW ('V', 18, s32)
-#define VIDIOC_STREAMOFF32 _IOW ('V', 19, s32)
-#define VIDIOC_G_INPUT32 _IOR ('V', 38, s32)
-#define VIDIOC_S_INPUT32 _IOWR('V', 39, s32)
-#define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32)
-#define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32)
+#ifdef CONFIG_COMPAT_32BIT_TIME
+#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32)
+#define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32)
+#define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32)
+#define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32)
+#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32)
+#endif
-static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+unsigned int v4l2_compat_translate_cmd(unsigned int cmd)
{
- union {
- struct v4l2_format v2f;
- struct v4l2_buffer v2b;
- struct v4l2_framebuffer v2fb;
- struct v4l2_input v2i;
- struct v4l2_standard v2s;
- struct v4l2_ext_controls v2ecs;
- struct v4l2_event v2ev;
- struct v4l2_create_buffers v2crt;
- struct v4l2_subdev_edid v2edid;
- unsigned long vx;
- int vi;
- } karg;
- void __user *up = compat_ptr(arg);
- int compatible_arg = 1;
- long err = 0;
-
- /* First, convert the command. */
switch (cmd) {
- case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
- case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
- case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
- case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break;
- case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break;
- case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break;
- case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break;
- case VIDIOC_ENUMSTD32: cmd = VIDIOC_ENUMSTD; break;
- case VIDIOC_ENUMINPUT32: cmd = VIDIOC_ENUMINPUT; break;
- case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
- case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
- case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
- case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
- case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
- case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
- case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
- case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
- case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
- case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
- case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break;
- case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
- case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
- case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
- case VIDIOC_SUBDEV_G_EDID32: cmd = VIDIOC_SUBDEV_G_EDID; break;
- case VIDIOC_SUBDEV_S_EDID32: cmd = VIDIOC_SUBDEV_S_EDID; break;
+ case VIDIOC_G_FMT32:
+ return VIDIOC_G_FMT;
+ case VIDIOC_S_FMT32:
+ return VIDIOC_S_FMT;
+ case VIDIOC_TRY_FMT32:
+ return VIDIOC_TRY_FMT;
+ case VIDIOC_G_FBUF32:
+ return VIDIOC_G_FBUF;
+ case VIDIOC_S_FBUF32:
+ return VIDIOC_S_FBUF;
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ return VIDIOC_QUERYBUF;
+ case VIDIOC_QBUF32_TIME32:
+ return VIDIOC_QBUF;
+ case VIDIOC_DQBUF32_TIME32:
+ return VIDIOC_DQBUF;
+ case VIDIOC_PREPARE_BUF32_TIME32:
+ return VIDIOC_PREPARE_BUF;
+#endif
+ case VIDIOC_QUERYBUF32:
+ return VIDIOC_QUERYBUF;
+ case VIDIOC_QBUF32:
+ return VIDIOC_QBUF;
+ case VIDIOC_DQBUF32:
+ return VIDIOC_DQBUF;
+ case VIDIOC_CREATE_BUFS32:
+ return VIDIOC_CREATE_BUFS;
+ case VIDIOC_G_EXT_CTRLS32:
+ return VIDIOC_G_EXT_CTRLS;
+ case VIDIOC_S_EXT_CTRLS32:
+ return VIDIOC_S_EXT_CTRLS;
+ case VIDIOC_TRY_EXT_CTRLS32:
+ return VIDIOC_TRY_EXT_CTRLS;
+ case VIDIOC_PREPARE_BUF32:
+ return VIDIOC_PREPARE_BUF;
+ case VIDIOC_ENUMSTD32:
+ return VIDIOC_ENUMSTD;
+ case VIDIOC_ENUMINPUT32:
+ return VIDIOC_ENUMINPUT;
+ case VIDIOC_G_EDID32:
+ return VIDIOC_G_EDID;
+ case VIDIOC_S_EDID32:
+ return VIDIOC_S_EDID;
+#ifdef CONFIG_X86_64
+ case VIDIOC_DQEVENT32:
+ return VIDIOC_DQEVENT;
+#endif
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_DQEVENT32_TIME32:
+ return VIDIOC_DQEVENT;
+#endif
}
+ return cmd;
+}
+int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd)
+{
switch (cmd) {
- case VIDIOC_OVERLAY:
- case VIDIOC_STREAMON:
- case VIDIOC_STREAMOFF:
- case VIDIOC_S_INPUT:
- case VIDIOC_S_OUTPUT:
- err = get_user(karg.vi, (s32 __user *)up);
- compatible_arg = 0;
- break;
+ case VIDIOC_G_FMT32:
+ case VIDIOC_S_FMT32:
+ case VIDIOC_TRY_FMT32:
+ return get_v4l2_format32(parg, arg);
- case VIDIOC_G_INPUT:
- case VIDIOC_G_OUTPUT:
- compatible_arg = 0;
- break;
+ case VIDIOC_S_FBUF32:
+ return get_v4l2_framebuffer32(parg, arg);
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+ return get_v4l2_buffer32_time32(parg, arg);
+#endif
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
+ case VIDIOC_PREPARE_BUF32:
+ return get_v4l2_buffer32(parg, arg);
- case VIDIOC_SUBDEV_G_EDID:
- case VIDIOC_SUBDEV_S_EDID:
- err = get_v4l2_subdev_edid32(&karg.v2edid, up);
- compatible_arg = 0;
- break;
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32:
+ return get_v4l2_ext_controls32(parg, arg);
- case VIDIOC_G_FMT:
- case VIDIOC_S_FMT:
- case VIDIOC_TRY_FMT:
- err = get_v4l2_format32(&karg.v2f, up);
- compatible_arg = 0;
- break;
+ case VIDIOC_CREATE_BUFS32:
+ return get_v4l2_create32(parg, arg);
- case VIDIOC_CREATE_BUFS:
- err = get_v4l2_create32(&karg.v2crt, up);
- compatible_arg = 0;
- break;
+ case VIDIOC_ENUMSTD32:
+ return get_v4l2_standard32(parg, arg);
- case VIDIOC_PREPARE_BUF:
- case VIDIOC_QUERYBUF:
- case VIDIOC_QBUF:
- case VIDIOC_DQBUF:
- err = get_v4l2_buffer32(&karg.v2b, up);
- compatible_arg = 0;
- break;
+ case VIDIOC_ENUMINPUT32:
+ return get_v4l2_input32(parg, arg);
- case VIDIOC_S_FBUF:
- err = get_v4l2_framebuffer32(&karg.v2fb, up);
- compatible_arg = 0;
- break;
+ case VIDIOC_G_EDID32:
+ case VIDIOC_S_EDID32:
+ return get_v4l2_edid32(parg, arg);
+ }
+ return 0;
+}
- case VIDIOC_G_FBUF:
- compatible_arg = 0;
- break;
+int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd)
+{
+ switch (cmd) {
+ case VIDIOC_G_FMT32:
+ case VIDIOC_S_FMT32:
+ case VIDIOC_TRY_FMT32:
+ return put_v4l2_format32(parg, arg);
- case VIDIOC_ENUMSTD:
- err = get_v4l2_standard32(&karg.v2s, up);
- compatible_arg = 0;
- break;
+ case VIDIOC_G_FBUF32:
+ return put_v4l2_framebuffer32(parg, arg);
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+ return put_v4l2_buffer32_time32(parg, arg);
+#endif
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
+ case VIDIOC_PREPARE_BUF32:
+ return put_v4l2_buffer32(parg, arg);
- case VIDIOC_ENUMINPUT:
- err = get_v4l2_input32(&karg.v2i, up);
- compatible_arg = 0;
- break;
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32:
+ return put_v4l2_ext_controls32(parg, arg);
- case VIDIOC_G_EXT_CTRLS:
- case VIDIOC_S_EXT_CTRLS:
- case VIDIOC_TRY_EXT_CTRLS:
- err = get_v4l2_ext_controls32(&karg.v2ecs, up);
- compatible_arg = 0;
- break;
- case VIDIOC_DQEVENT:
- compatible_arg = 0;
- break;
- }
- if (err)
- return err;
+ case VIDIOC_CREATE_BUFS32:
+ return put_v4l2_create32(parg, arg);
- if (compatible_arg)
- err = native_ioctl(file, cmd, (unsigned long)up);
- else {
- mm_segment_t old_fs = get_fs();
+ case VIDIOC_ENUMSTD32:
+ return put_v4l2_standard32(parg, arg);
- set_fs(KERNEL_DS);
- err = native_ioctl(file, cmd, (unsigned long)&karg);
- set_fs(old_fs);
- }
+ case VIDIOC_ENUMINPUT32:
+ return put_v4l2_input32(parg, arg);
- /* Special case: even after an error we need to put the
- results back for these ioctls since the error_idx will
- contain information on which control failed. */
- switch (cmd) {
- case VIDIOC_G_EXT_CTRLS:
- case VIDIOC_S_EXT_CTRLS:
- case VIDIOC_TRY_EXT_CTRLS:
- if (put_v4l2_ext_controls32(&karg.v2ecs, up))
- err = -EFAULT;
- break;
+ case VIDIOC_G_EDID32:
+ case VIDIOC_S_EDID32:
+ return put_v4l2_edid32(parg, arg);
+#ifdef CONFIG_X86_64
+ case VIDIOC_DQEVENT32:
+ return put_v4l2_event32(parg, arg);
+#endif
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_DQEVENT32_TIME32:
+ return put_v4l2_event32_time32(parg, arg);
+#endif
}
- if (err)
- return err;
+ return 0;
+}
+
+int v4l2_compat_get_array_args(struct file *file, void *mbuf,
+ void __user *user_ptr, size_t array_size,
+ unsigned int cmd, void *arg)
+{
+ int err = 0;
+
+ memset(mbuf, 0, array_size);
switch (cmd) {
- case VIDIOC_S_INPUT:
- case VIDIOC_S_OUTPUT:
- case VIDIOC_G_INPUT:
- case VIDIOC_G_OUTPUT:
- err = put_user(((s32)karg.vi), (s32 __user *)up);
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+#endif
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
+ case VIDIOC_PREPARE_BUF32: {
+ struct v4l2_buffer *b64 = arg;
+ struct v4l2_plane *p64 = mbuf;
+ struct v4l2_plane32 __user *p32 = user_ptr;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) {
+ u32 num_planes = b64->length;
+
+ if (num_planes == 0)
+ return 0;
+
+ while (num_planes--) {
+ err = get_v4l2_plane32(p64, p32, b64->memory);
+ if (err)
+ return err;
+ ++p64;
+ ++p32;
+ }
+ }
break;
+ }
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32: {
+ struct v4l2_ext_controls *ecs64 = arg;
+ struct v4l2_ext_control *ec64 = mbuf;
+ struct v4l2_ext_control32 __user *ec32 = user_ptr;
+ int n;
+
+ for (n = 0; n < ecs64->count; n++) {
+ if (copy_from_user(ec64, ec32, sizeof(*ec32)))
+ return -EFAULT;
- case VIDIOC_G_FBUF:
- err = put_v4l2_framebuffer32(&karg.v2fb, up);
- break;
+ if (ctrl_is_pointer(file, ec64->id)) {
+ compat_uptr_t p;
- case VIDIOC_DQEVENT:
- err = put_v4l2_event32(&karg.v2ev, up);
+ if (get_user(p, &ec32->string))
+ return -EFAULT;
+ ec64->string = compat_ptr(p);
+ }
+ ec32++;
+ ec64++;
+ }
break;
-
- case VIDIOC_SUBDEV_G_EDID:
- case VIDIOC_SUBDEV_S_EDID:
- err = put_v4l2_subdev_edid32(&karg.v2edid, up);
+ }
+ default:
+ if (copy_from_user(mbuf, user_ptr, array_size))
+ err = -EFAULT;
break;
+ }
- case VIDIOC_G_FMT:
- case VIDIOC_S_FMT:
- case VIDIOC_TRY_FMT:
- err = put_v4l2_format32(&karg.v2f, up);
- break;
+ return err;
+}
- case VIDIOC_CREATE_BUFS:
- err = put_v4l2_create32(&karg.v2crt, up);
- break;
+int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr,
+ void *mbuf, size_t array_size,
+ unsigned int cmd, void *arg)
+{
+ int err = 0;
- case VIDIOC_QUERYBUF:
- case VIDIOC_QBUF:
- case VIDIOC_DQBUF:
- err = put_v4l2_buffer32(&karg.v2b, up);
+ switch (cmd) {
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+#endif
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
+ case VIDIOC_PREPARE_BUF32: {
+ struct v4l2_buffer *b64 = arg;
+ struct v4l2_plane *p64 = mbuf;
+ struct v4l2_plane32 __user *p32 = user_ptr;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) {
+ u32 num_planes = b64->length;
+
+ if (num_planes == 0)
+ return 0;
+
+ while (num_planes--) {
+ err = put_v4l2_plane32(p64, p32, b64->memory);
+ if (err)
+ return err;
+ ++p64;
+ ++p32;
+ }
+ }
break;
+ }
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32: {
+ struct v4l2_ext_controls *ecs64 = arg;
+ struct v4l2_ext_control *ec64 = mbuf;
+ struct v4l2_ext_control32 __user *ec32 = user_ptr;
+ int n;
+
+ for (n = 0; n < ecs64->count; n++) {
+ unsigned int size = sizeof(*ec32);
+ /*
+ * Do not modify the pointer when copying a pointer
+ * control. The contents of the pointer was changed,
+ * not the pointer itself.
+ * The structures are otherwise compatible.
+ */
+ if (ctrl_is_pointer(file, ec64->id))
+ size -= sizeof(ec32->value64);
+
+ if (copy_to_user(ec32, ec64, size))
+ return -EFAULT;
- case VIDIOC_ENUMSTD:
- err = put_v4l2_standard32(&karg.v2s, up);
+ ec32++;
+ ec64++;
+ }
break;
-
- case VIDIOC_ENUMINPUT:
- err = put_v4l2_input32(&karg.v2i, up);
+ }
+ default:
+ if (copy_to_user(user_ptr, mbuf, array_size))
+ err = -EFAULT;
break;
}
+
return err;
}
+/**
+ * v4l2_compat_ioctl32() - Handles a compat32 ioctl call
+ *
+ * @file: pointer to &struct file with the file handler
+ * @cmd: ioctl to be called
+ * @arg: arguments passed from/to the ioctl handler
+ *
+ * This function is meant to be used as .compat_ioctl fops at v4l2-dev.c
+ * in order to deal with 32-bit calls on a 64-bits Kernel.
+ *
+ * This function calls do_video_ioctl() for non-private V4L2 ioctls.
+ * If the function is a private one it calls vdev->fops->compat_ioctl32
+ * instead.
+ */
long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
{
struct video_device *vdev = video_devdata(file);
@@ -1006,103 +1190,18 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
if (!file->f_op->unlocked_ioctl)
return ret;
- switch (cmd) {
- case VIDIOC_QUERYCAP:
- case VIDIOC_RESERVED:
- case VIDIOC_ENUM_FMT:
- case VIDIOC_G_FMT32:
- case VIDIOC_S_FMT32:
- case VIDIOC_REQBUFS:
- case VIDIOC_QUERYBUF32:
- case VIDIOC_G_FBUF32:
- case VIDIOC_S_FBUF32:
- case VIDIOC_OVERLAY32:
- case VIDIOC_QBUF32:
- case VIDIOC_EXPBUF:
- case VIDIOC_DQBUF32:
- case VIDIOC_STREAMON32:
- case VIDIOC_STREAMOFF32:
- case VIDIOC_G_PARM:
- case VIDIOC_S_PARM:
- case VIDIOC_G_STD:
- case VIDIOC_S_STD:
- case VIDIOC_ENUMSTD32:
- case VIDIOC_ENUMINPUT32:
- case VIDIOC_G_CTRL:
- case VIDIOC_S_CTRL:
- case VIDIOC_G_TUNER:
- case VIDIOC_S_TUNER:
- case VIDIOC_G_AUDIO:
- case VIDIOC_S_AUDIO:
- case VIDIOC_QUERYCTRL:
- case VIDIOC_QUERYMENU:
- case VIDIOC_G_INPUT32:
- case VIDIOC_S_INPUT32:
- case VIDIOC_G_OUTPUT32:
- case VIDIOC_S_OUTPUT32:
- case VIDIOC_ENUMOUTPUT:
- case VIDIOC_G_AUDOUT:
- case VIDIOC_S_AUDOUT:
- case VIDIOC_G_MODULATOR:
- case VIDIOC_S_MODULATOR:
- case VIDIOC_S_FREQUENCY:
- case VIDIOC_G_FREQUENCY:
- case VIDIOC_CROPCAP:
- case VIDIOC_G_CROP:
- case VIDIOC_S_CROP:
- case VIDIOC_G_SELECTION:
- case VIDIOC_S_SELECTION:
- case VIDIOC_G_JPEGCOMP:
- case VIDIOC_S_JPEGCOMP:
- case VIDIOC_QUERYSTD:
- case VIDIOC_TRY_FMT32:
- case VIDIOC_ENUMAUDIO:
- case VIDIOC_ENUMAUDOUT:
- case VIDIOC_G_PRIORITY:
- case VIDIOC_S_PRIORITY:
- case VIDIOC_G_SLICED_VBI_CAP:
- case VIDIOC_LOG_STATUS:
- case VIDIOC_G_EXT_CTRLS32:
- case VIDIOC_S_EXT_CTRLS32:
- case VIDIOC_TRY_EXT_CTRLS32:
- case VIDIOC_ENUM_FRAMESIZES:
- case VIDIOC_ENUM_FRAMEINTERVALS:
- case VIDIOC_G_ENC_INDEX:
- case VIDIOC_ENCODER_CMD:
- case VIDIOC_TRY_ENCODER_CMD:
- case VIDIOC_DECODER_CMD:
- case VIDIOC_TRY_DECODER_CMD:
- case VIDIOC_DBG_S_REGISTER:
- case VIDIOC_DBG_G_REGISTER:
- case VIDIOC_S_HW_FREQ_SEEK:
- case VIDIOC_S_DV_TIMINGS:
- case VIDIOC_G_DV_TIMINGS:
- case VIDIOC_DQEVENT:
- case VIDIOC_DQEVENT32:
- case VIDIOC_SUBSCRIBE_EVENT:
- case VIDIOC_UNSUBSCRIBE_EVENT:
- case VIDIOC_CREATE_BUFS32:
- case VIDIOC_PREPARE_BUF32:
- case VIDIOC_ENUM_DV_TIMINGS:
- case VIDIOC_QUERY_DV_TIMINGS:
- case VIDIOC_DV_TIMINGS_CAP:
- case VIDIOC_ENUM_FREQ_BANDS:
- case VIDIOC_SUBDEV_G_EDID32:
- case VIDIOC_SUBDEV_S_EDID32:
- ret = do_video_ioctl(file, cmd, arg);
- break;
+ if (!video_is_registered(vdev))
+ return -ENODEV;
- default:
- if (vdev->fops->compat_ioctl32)
- ret = vdev->fops->compat_ioctl32(file, cmd, arg);
-
- if (ret == -ENOIOCTLCMD)
- printk(KERN_WARNING "compat_ioctl32: "
- "unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
- _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd),
- cmd);
- break;
- }
+ if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
+ ret = file->f_op->unlocked_ioctl(file, cmd,
+ (unsigned long)compat_ptr(arg));
+ else if (vdev->fops->compat_ioctl32)
+ ret = vdev->fops->compat_ioctl32(file, cmd, arg);
+
+ if (ret == -ENOIOCTLCMD)
+ pr_debug("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
+ _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
new file mode 100644
index 000000000000..0078a04c5445
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -0,0 +1,1361 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework uAPI implementation:
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
+ */
+
+#define pr_fmt(fmt) "v4l2-ctrls: " fmt
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "v4l2-ctrls-priv.h"
+
+/* Internal temporary helper struct, one for each v4l2_ext_control */
+struct v4l2_ctrl_helper {
+ /* Pointer to the control reference of the master control */
+ struct v4l2_ctrl_ref *mref;
+ /* The control ref corresponding to the v4l2_ext_control ID field. */
+ struct v4l2_ctrl_ref *ref;
+ /*
+ * v4l2_ext_control index of the next control belonging to the
+ * same cluster, or 0 if there isn't any.
+ */
+ u32 next;
+};
+
+/*
+ * Helper functions to copy control payload data from kernel space to
+ * user space and vice versa.
+ */
+
+/* Helper function: copy the given control value back to the caller */
+static int ptr_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr)
+{
+ u32 len;
+
+ if (ctrl->is_ptr && !ctrl->is_string)
+ return copy_to_user(c->ptr, ptr.p_const, c->size) ?
+ -EFAULT : 0;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ len = strlen(ptr.p_char);
+ if (c->size < len + 1) {
+ c->size = ctrl->elem_size;
+ return -ENOSPC;
+ }
+ return copy_to_user(c->string, ptr.p_char, len + 1) ?
+ -EFAULT : 0;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ c->value64 = *ptr.p_s64;
+ break;
+ default:
+ c->value = *ptr.p_s32;
+ break;
+ }
+ return 0;
+}
+
+/* Helper function: copy the current control value back to the caller */
+static int cur_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ return ptr_to_user(c, ctrl, ctrl->p_cur);
+}
+
+/* Helper function: copy the new control value back to the caller */
+static int new_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl)
+{
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the request value back to the caller */
+static int req_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl_ref *ref)
+{
+ return ptr_to_user(c, ref->ctrl, ref->p_req);
+}
+
+/* Helper function: copy the initial control value back to the caller */
+static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ ctrl->type_ops->init(ctrl, 0, ctrl->p_new);
+
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the minimum control value back to the caller */
+static int min_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ ctrl->type_ops->minimum(ctrl, 0, ctrl->p_new);
+
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the maximum control value back to the caller */
+static int max_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ ctrl->type_ops->maximum(ctrl, 0, ctrl->p_new);
+
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the caller-provider value as the new control value */
+static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ int ret;
+ u32 size;
+
+ ctrl->is_new = 0;
+ if (ctrl->is_dyn_array &&
+ c->size > ctrl->p_array_alloc_elems * ctrl->elem_size) {
+ void *old = ctrl->p_array;
+ void *tmp = kvzalloc(2 * c->size, GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size);
+ memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
+ ctrl->p_new.p = tmp;
+ ctrl->p_cur.p = tmp + c->size;
+ ctrl->p_array = tmp;
+ ctrl->p_array_alloc_elems = c->size / ctrl->elem_size;
+ kvfree(old);
+ }
+
+ if (ctrl->is_ptr && !ctrl->is_string) {
+ unsigned int elems = c->size / ctrl->elem_size;
+
+ if (copy_from_user(ctrl->p_new.p, c->ptr, c->size))
+ return -EFAULT;
+ ctrl->is_new = 1;
+ if (ctrl->is_dyn_array)
+ ctrl->new_elems = elems;
+ else if (ctrl->is_array)
+ ctrl->type_ops->init(ctrl, elems, ctrl->p_new);
+ return 0;
+ }
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ *ctrl->p_new.p_s64 = c->value64;
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ size = c->size;
+ if (size == 0)
+ return -ERANGE;
+ if (size > ctrl->maximum + 1)
+ size = ctrl->maximum + 1;
+ ret = copy_from_user(ctrl->p_new.p_char, c->string, size) ? -EFAULT : 0;
+ if (!ret) {
+ char last = ctrl->p_new.p_char[size - 1];
+
+ ctrl->p_new.p_char[size - 1] = 0;
+ /*
+ * If the string was longer than ctrl->maximum,
+ * then return an error.
+ */
+ if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
+ return -ERANGE;
+ ctrl->is_new = 1;
+ }
+ return ret;
+ default:
+ *ctrl->p_new.p_s32 = c->value;
+ break;
+ }
+ ctrl->is_new = 1;
+ return 0;
+}
+
+/*
+ * VIDIOC_G/TRY/S_EXT_CTRLS implementation
+ */
+
+/*
+ * Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
+ *
+ * It is not a fully atomic operation, just best-effort only. After all, if
+ * multiple controls have to be set through multiple i2c writes (for example)
+ * then some initial writes may succeed while others fail. Thus leaving the
+ * system in an inconsistent state. The question is how much effort you are
+ * willing to spend on trying to make something atomic that really isn't.
+ *
+ * From the point of view of an application the main requirement is that
+ * when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an
+ * error should be returned without actually affecting any controls.
+ *
+ * If all the values are correct, then it is acceptable to just give up
+ * in case of low-level errors.
+ *
+ * It is important though that the application can tell when only a partial
+ * configuration was done. The way we do that is through the error_idx field
+ * of struct v4l2_ext_controls: if that is equal to the count field then no
+ * controls were affected. Otherwise all controls before that index were
+ * successful in performing their 'get' or 'set' operation, the control at
+ * the given index failed, and you don't know what happened with the controls
+ * after the failed one. Since if they were part of a control cluster they
+ * could have been successfully processed (if a cluster member was encountered
+ * at index < error_idx), they could have failed (if a cluster member was at
+ * error_idx), or they may not have been processed yet (if the first cluster
+ * member appeared after error_idx).
+ *
+ * It is all fairly theoretical, though. In practice all you can do is to
+ * bail out. If error_idx == count, then it is an application bug. If
+ * error_idx < count then it is only an application bug if the error code was
+ * EBUSY. That usually means that something started streaming just when you
+ * tried to set the controls. In all other cases it is a driver/hardware
+ * problem and all you can do is to retry or bail out.
+ *
+ * Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that
+ * never modifies controls the error_idx is just set to whatever control
+ * has an invalid value.
+ */
+
+/*
+ * Prepare for the extended g/s/try functions.
+ * Find the controls in the control array and do some basic checks.
+ */
+static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers,
+ struct video_device *vdev,
+ bool get)
+{
+ struct v4l2_ctrl_helper *h;
+ bool have_clusters = false;
+ u32 i;
+
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ext_control *c = &cs->controls[i];
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
+ u32 id = c->id & V4L2_CTRL_ID_MASK;
+
+ cs->error_idx = i;
+
+ if (cs->which &&
+ (cs->which < V4L2_CTRL_WHICH_DEF_VAL ||
+ cs->which > V4L2_CTRL_WHICH_MAX_VAL) &&
+ V4L2_CTRL_ID2WHICH(id) != cs->which) {
+ dprintk(vdev,
+ "invalid which 0x%x or control id 0x%x\n",
+ cs->which, id);
+ return -EINVAL;
+ }
+
+ /*
+ * Old-style private controls are not allowed for
+ * extended controls.
+ */
+ if (id >= V4L2_CID_PRIVATE_BASE) {
+ dprintk(vdev,
+ "old-style private controls not allowed\n");
+ return -EINVAL;
+ }
+ ref = find_ref_lock(hdl, id);
+ if (!ref) {
+ dprintk(vdev, "cannot find control id 0x%x\n", id);
+ return -EINVAL;
+ }
+ h->ref = ref;
+ ctrl = ref->ctrl;
+ if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) {
+ dprintk(vdev, "control id 0x%x is disabled\n", id);
+ return -EINVAL;
+ }
+
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) &&
+ (cs->which == V4L2_CTRL_WHICH_MIN_VAL ||
+ cs->which == V4L2_CTRL_WHICH_MAX_VAL)) {
+ dprintk(vdev,
+ "invalid which 0x%x or control id 0x%x\n",
+ cs->which, id);
+ return -EINVAL;
+ }
+
+ if (ctrl->cluster[0]->ncontrols > 1)
+ have_clusters = true;
+ if (ctrl->cluster[0] != ctrl)
+ ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
+ if (ctrl->is_dyn_array) {
+ unsigned int max_size = ctrl->dims[0] * ctrl->elem_size;
+ unsigned int tot_size = ctrl->elem_size;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ tot_size *= ref->p_req_elems;
+ else
+ tot_size *= ctrl->elems;
+
+ c->size = ctrl->elem_size * (c->size / ctrl->elem_size);
+ if (get) {
+ if (c->size < tot_size) {
+ c->size = tot_size;
+ return -ENOSPC;
+ }
+ c->size = tot_size;
+ } else {
+ if (c->size > max_size) {
+ c->size = max_size;
+ return -ENOSPC;
+ }
+ if (!c->size)
+ return -EFAULT;
+ }
+ } else if (ctrl->is_ptr && !ctrl->is_string) {
+ unsigned int tot_size = ctrl->elems * ctrl->elem_size;
+
+ if (c->size < tot_size) {
+ /*
+ * In the get case the application first
+ * queries to obtain the size of the control.
+ */
+ if (get) {
+ c->size = tot_size;
+ return -ENOSPC;
+ }
+ dprintk(vdev,
+ "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n",
+ id, c->size, tot_size);
+ return -EFAULT;
+ }
+ c->size = tot_size;
+ }
+ /* Store the ref to the master control of the cluster */
+ h->mref = ref;
+ /*
+ * Initially set next to 0, meaning that there is no other
+ * control in this helper array belonging to the same
+ * cluster.
+ */
+ h->next = 0;
+ }
+
+ /*
+ * We are done if there were no controls that belong to a multi-
+ * control cluster.
+ */
+ if (!have_clusters)
+ return 0;
+
+ /*
+ * The code below figures out in O(n) time which controls in the list
+ * belong to the same cluster.
+ */
+
+ /* This has to be done with the handler lock taken. */
+ mutex_lock(hdl->lock);
+
+ /* First zero the helper field in the master control references */
+ for (i = 0; i < cs->count; i++)
+ helpers[i].mref->helper = NULL;
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ctrl_ref *mref = h->mref;
+
+ /*
+ * If the mref->helper is set, then it points to an earlier
+ * helper that belongs to the same cluster.
+ */
+ if (mref->helper) {
+ /*
+ * Set the next field of mref->helper to the current
+ * index: this means that the earlier helper now
+ * points to the next helper in the same cluster.
+ */
+ mref->helper->next = i;
+ /*
+ * mref should be set only for the first helper in the
+ * cluster, clear the others.
+ */
+ h->mref = NULL;
+ }
+ /* Point the mref helper to the current helper struct. */
+ mref->helper = h;
+ }
+ mutex_unlock(hdl->lock);
+ return 0;
+}
+
+/*
+ * Handles the corner case where cs->count == 0. It checks whether the
+ * specified control class exists. If that class ID is 0, then it checks
+ * whether there are any controls at all.
+ */
+static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
+{
+ if (which == 0 || (which >= V4L2_CTRL_WHICH_DEF_VAL &&
+ which <= V4L2_CTRL_WHICH_MAX_VAL))
+ return 0;
+ return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
+}
+
+/*
+ * Get extended controls. Allocates the helpers array if needed.
+ *
+ * Note that v4l2_g_ext_ctrls_common() with 'which' set to
+ * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
+ * completed, and in that case p_req_valid is true for all controls.
+ */
+int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev)
+{
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ int ret;
+ int i, j;
+ bool is_default, is_request, is_min, is_max;
+
+ is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
+ is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
+ is_min = (cs->which == V4L2_CTRL_WHICH_MIN_VAL);
+ is_max = (cs->which == V4L2_CTRL_WHICH_MAX_VAL);
+
+ cs->error_idx = cs->count;
+ cs->which = V4L2_CTRL_ID2WHICH(cs->which);
+
+ if (!hdl)
+ return -EINVAL;
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->which);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
+ if (!helpers)
+ return -ENOMEM;
+ }
+
+ ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true);
+ cs->error_idx = cs->count;
+
+ for (i = 0; !ret && i < cs->count; i++)
+ if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ ret = -EACCES;
+
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *master;
+ bool is_volatile = false;
+ u32 idx = i;
+
+ if (!helpers[i].mref)
+ continue;
+
+ master = helpers[i].mref->ctrl;
+ cs->error_idx = i;
+
+ v4l2_ctrl_lock(master);
+
+ /*
+ * g_volatile_ctrl will update the new control values.
+ * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL,
+ * V4L2_CTRL_WHICH_MIN_VAL, V4L2_CTRL_WHICH_MAX_VAL and
+ * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
+ * it is v4l2_ctrl_request_complete() that copies the
+ * volatile controls at the time of request completion
+ * to the request, so you don't want to do that again.
+ */
+ if (!is_default && !is_request && !is_min && !is_max &&
+ ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
+ (master->has_volatiles && !is_cur_manual(master)))) {
+ for (j = 0; j < master->ncontrols; j++)
+ cur_to_new(master->cluster[j]);
+ ret = call_op(master, g_volatile_ctrl);
+ is_volatile = true;
+ }
+
+ if (ret) {
+ v4l2_ctrl_unlock(master);
+ break;
+ }
+
+ /*
+ * Copy the default value (if is_default is true), the
+ * request value (if is_request is true and p_req is valid),
+ * the new volatile value (if is_volatile is true) or the
+ * current value.
+ */
+ do {
+ struct v4l2_ctrl_ref *ref = helpers[idx].ref;
+
+ if (is_default)
+ ret = def_to_user(cs->controls + idx, ref->ctrl);
+ else if (is_request && ref->p_req_array_enomem)
+ ret = -ENOMEM;
+ else if (is_request && ref->p_req_valid)
+ ret = req_to_user(cs->controls + idx, ref);
+ else if (is_min)
+ ret = min_to_user(cs->controls + idx, ref->ctrl);
+ else if (is_max)
+ ret = max_to_user(cs->controls + idx, ref->ctrl);
+ else if (is_volatile)
+ ret = new_to_user(cs->controls + idx, ref->ctrl);
+ else
+ ret = cur_to_user(cs->controls + idx, ref->ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+
+ v4l2_ctrl_unlock(master);
+ }
+
+ if (cs->count > ARRAY_SIZE(helper))
+ kvfree(helpers);
+ return ret;
+}
+
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
+{
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ return v4l2_g_ext_ctrls_request(hdl, vdev, mdev, cs);
+
+ return v4l2_g_ext_ctrls_common(hdl, cs, vdev);
+}
+EXPORT_SYMBOL(v4l2_g_ext_ctrls);
+
+/* Validate a new control */
+static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
+{
+ return ctrl->type_ops->validate(ctrl, p_new);
+}
+
+/* Validate controls. */
+static int validate_ctrls(struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers,
+ struct video_device *vdev,
+ bool set)
+{
+ unsigned int i;
+ int ret = 0;
+
+ cs->error_idx = cs->count;
+ for (i = 0; i < cs->count; i++) {
+ struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl;
+ union v4l2_ctrl_ptr p_new;
+
+ cs->error_idx = i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) {
+ dprintk(vdev,
+ "control id 0x%x is read-only\n",
+ ctrl->id);
+ return -EACCES;
+ }
+ /*
+ * This test is also done in try_set_control_cluster() which
+ * is called in atomic context, so that has the final say,
+ * but it makes sense to do an up-front check as well. Once
+ * an error occurs in try_set_control_cluster() some other
+ * controls may have been set already and we want to do a
+ * best-effort to avoid that.
+ */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) {
+ dprintk(vdev,
+ "control id 0x%x is grabbed, cannot set\n",
+ ctrl->id);
+ return -EBUSY;
+ }
+ /*
+ * Skip validation for now if the payload needs to be copied
+ * from userspace into kernelspace. We'll validate those later.
+ */
+ if (ctrl->is_ptr)
+ continue;
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ p_new.p_s64 = &cs->controls[i].value64;
+ else
+ p_new.p_s32 = &cs->controls[i].value;
+ ret = validate_new(ctrl, p_new);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Try or try-and-set controls */
+int try_set_ext_ctrls_common(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev, bool set)
+{
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ unsigned int i, j;
+ int ret;
+
+ cs->error_idx = cs->count;
+
+ /* Default/minimum/maximum values cannot be changed */
+ if (cs->which == V4L2_CTRL_WHICH_DEF_VAL ||
+ cs->which == V4L2_CTRL_WHICH_MIN_VAL ||
+ cs->which == V4L2_CTRL_WHICH_MAX_VAL) {
+ dprintk(vdev, "%s: cannot change default/min/max value\n",
+ video_device_node_name(vdev));
+ return -EINVAL;
+ }
+
+ cs->which = V4L2_CTRL_ID2WHICH(cs->which);
+
+ if (!hdl) {
+ dprintk(vdev, "%s: invalid null control handler\n",
+ video_device_node_name(vdev));
+ return -EINVAL;
+ }
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->which);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
+ if (!helpers)
+ return -ENOMEM;
+ }
+ ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false);
+ if (!ret)
+ ret = validate_ctrls(cs, helpers, vdev, set);
+ if (ret && set)
+ cs->error_idx = cs->count;
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *master;
+ u32 idx = i;
+
+ if (!helpers[i].mref)
+ continue;
+
+ cs->error_idx = i;
+ master = helpers[i].mref->ctrl;
+ v4l2_ctrl_lock(master);
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (j = 0; j < master->ncontrols; j++)
+ if (master->cluster[j])
+ master->cluster[j]->is_new = 0;
+
+ /*
+ * For volatile autoclusters that are currently in auto mode
+ * we need to discover if it will be set to manual mode.
+ * If so, then we have to copy the current volatile values
+ * first since those will become the new manual values (which
+ * may be overwritten by explicit new values from this set
+ * of controls).
+ */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ /* Pick an initial non-manual value */
+ s32 new_auto_val = master->manual_mode_value + 1;
+ u32 tmp_idx = idx;
+
+ do {
+ /*
+ * Check if the auto control is part of the
+ * list, and remember the new value.
+ */
+ if (helpers[tmp_idx].ref->ctrl == master)
+ new_auto_val = cs->controls[tmp_idx].value;
+ tmp_idx = helpers[tmp_idx].next;
+ } while (tmp_idx);
+ /*
+ * If the new value == the manual value, then copy
+ * the current volatile values.
+ */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ /*
+ * Copy the new caller-supplied control values.
+ * user_to_new() sets 'is_new' to 1.
+ */
+ do {
+ struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl;
+
+ ret = user_to_new(cs->controls + idx, ctrl);
+ if (!ret && ctrl->is_ptr) {
+ ret = validate_new(ctrl, ctrl->p_new);
+ if (ret)
+ dprintk(vdev,
+ "failed to validate control %s (%d)\n",
+ v4l2_ctrl_get_name(ctrl->id), ret);
+ }
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+
+ if (!ret)
+ ret = try_or_set_cluster(fh, master,
+ !hdl->req_obj.req && set, 0);
+ if (!ret && hdl->req_obj.req && set) {
+ for (j = 0; j < master->ncontrols; j++) {
+ struct v4l2_ctrl_ref *ref =
+ find_ref(hdl, master->cluster[j]->id);
+
+ new_to_req(ref);
+ }
+ }
+
+ /* Copy the new values back to userspace. */
+ if (!ret) {
+ idx = i;
+ do {
+ ret = new_to_user(cs->controls + idx,
+ helpers[idx].ref->ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+ }
+ v4l2_ctrl_unlock(master);
+ }
+
+ if (cs->count > ARRAY_SIZE(helper))
+ kvfree(helpers);
+ return ret;
+}
+
+static int try_set_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set)
+{
+ int ret;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ return try_set_ext_ctrls_request(fh, hdl, vdev, mdev, cs, set);
+
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
+ if (ret)
+ dprintk(vdev,
+ "%s: try_set_ext_ctrls_common failed (%d)\n",
+ video_device_node_name(vdev), ret);
+
+ return ret;
+}
+
+int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false);
+}
+EXPORT_SYMBOL(v4l2_try_ext_ctrls);
+
+int v4l2_s_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true);
+}
+EXPORT_SYMBOL(v4l2_s_ext_ctrls);
+
+/*
+ * VIDIOC_G/S_CTRL implementation
+ */
+
+/* Helper function to get a single control */
+static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret = 0;
+ int i;
+
+ /* Compound controls are not supported. The new_to_user() and
+ * cur_to_user() calls below would need to be modified not to access
+ * userspace memory when called from get_ctrl().
+ */
+ if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64)
+ return -EINVAL;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ return -EACCES;
+
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ ret = call_op(master, g_volatile_ctrl);
+ if (!ret)
+ ret = new_to_user(c, ctrl);
+ } else {
+ ret = cur_to_user(c, ctrl);
+ }
+ v4l2_ctrl_unlock(master);
+ return ret;
+}
+
+int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+ struct v4l2_ext_control c;
+ int ret;
+
+ if (!ctrl || !ctrl->is_int)
+ return -EINVAL;
+ ret = get_ctrl(ctrl, &c);
+
+ if (!ret)
+ control->value = c.value;
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_g_ctrl);
+
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret;
+ int i;
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (i = 0; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->is_new = 0;
+
+ ret = validate_new(ctrl, ctrl->p_new);
+ if (ret)
+ return ret;
+
+ /*
+ * For autoclusters with volatiles that are switched from auto to
+ * manual mode we have to update the current volatile values since
+ * those will become the initial manual values after such a switch.
+ */
+ if (master->is_auto && master->has_volatiles && ctrl == master &&
+ !is_cur_manual(master) && ctrl->val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+
+ ctrl->is_new = 1;
+ return try_or_set_cluster(fh, master, true, ch_flags);
+}
+
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ struct v4l2_ext_control *c)
+{
+ int ret;
+
+ v4l2_ctrl_lock(ctrl);
+ ret = user_to_new(c, ctrl);
+ if (!ret)
+ ret = set_ctrl(fh, ctrl, 0);
+ if (!ret)
+ ret = cur_to_user(c, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ return ret;
+}
+
+int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+ struct v4l2_ext_control c = { control->id };
+ int ret;
+
+ if (!ctrl || !ctrl->is_int)
+ return -EINVAL;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+
+ c.value = control->value;
+ ret = set_ctrl_lock(fh, ctrl, &c);
+ control->value = c.value;
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_s_ctrl);
+
+/*
+ * Helper functions for drivers to get/set controls.
+ */
+
+s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ext_control c;
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(!ctrl->is_int))
+ return 0;
+ c.value = 0;
+ get_ctrl(ctrl, &c);
+ return c.value;
+}
+EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
+
+s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ext_control c;
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
+ return 0;
+ c.value64 = 0;
+ get_ctrl(ctrl, &c);
+ return c.value64;
+}
+EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
+
+int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(!ctrl->is_int))
+ return -EINVAL;
+ ctrl->val = val;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl);
+
+int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
+ return -EINVAL;
+ *ctrl->p_new.p_s64 = val;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64);
+
+int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING))
+ return -EINVAL;
+ strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+
+int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl,
+ enum v4l2_ctrl_type type, const void *p)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->type != type))
+ return -EINVAL;
+ /* Setting dynamic arrays is not (yet?) supported. */
+ if (WARN_ON(ctrl->is_dyn_array))
+ return -EINVAL;
+ memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size);
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound);
+
+/*
+ * Modify the range of a control.
+ */
+int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
+ s64 min, s64 max, u64 step, s64 def)
+{
+ bool value_changed;
+ bool range_changed = false;
+ int ret;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER64:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
+ if (ctrl->is_array)
+ return -EINVAL;
+ ret = check_range(ctrl->type, min, max, step, def);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ctrl->minimum != min || ctrl->maximum != max ||
+ ctrl->step != step || ctrl->default_value != def) {
+ range_changed = true;
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ }
+ cur_to_new(ctrl);
+ if (validate_new(ctrl, ctrl->p_new)) {
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ *ctrl->p_new.p_s64 = def;
+ else
+ *ctrl->p_new.p_s32 = def;
+ }
+
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64;
+ else
+ value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32;
+ if (value_changed)
+ ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ else if (range_changed)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ return ret;
+}
+EXPORT_SYMBOL(__v4l2_ctrl_modify_range);
+
+int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
+ u32 dims[V4L2_CTRL_MAX_DIMS])
+{
+ unsigned int elems = 1;
+ unsigned int i;
+ void *p_array;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ if (!ctrl->is_array || ctrl->is_dyn_array)
+ return -EINVAL;
+
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ elems *= dims[i];
+ if (elems == 0)
+ return -EINVAL;
+ p_array = kvzalloc(2 * elems * ctrl->elem_size, GFP_KERNEL);
+ if (!p_array)
+ return -ENOMEM;
+ kvfree(ctrl->p_array);
+ ctrl->p_array_alloc_elems = elems;
+ ctrl->elems = elems;
+ ctrl->new_elems = elems;
+ ctrl->p_array = p_array;
+ ctrl->p_new.p = p_array;
+ ctrl->p_cur.p = p_array + elems * ctrl->elem_size;
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ ctrl->dims[i] = dims[i];
+ ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
+ cur_to_new(ctrl);
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE |
+ V4L2_EVENT_CTRL_CH_DIMENSIONS);
+ return 0;
+}
+EXPORT_SYMBOL(__v4l2_ctrl_modify_dimensions);
+
+/* Implement VIDIOC_QUERY_EXT_CTRL */
+int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc)
+{
+ const unsigned int next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND;
+ u32 id = qc->id & V4L2_CTRL_ID_MASK;
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
+
+ if (!hdl)
+ return -EINVAL;
+
+ mutex_lock(hdl->lock);
+
+ /* Try to find it */
+ ref = find_ref(hdl, id);
+
+ if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) {
+ bool is_compound;
+ /* Match any control that is not hidden */
+ unsigned int mask = 1;
+ bool match = false;
+
+ if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) {
+ /* Match any hidden control */
+ match = true;
+ } else if ((qc->id & next_flags) == next_flags) {
+ /* Match any control, compound or not */
+ mask = 0;
+ }
+
+ /* Find the next control with ID > qc->id */
+
+ /* Did we reach the end of the control list? */
+ if (id >= node2id(hdl->ctrl_refs.prev)) {
+ ref = NULL; /* Yes, so there is no next control */
+ } else if (ref) {
+ struct v4l2_ctrl_ref *pos = ref;
+
+ /*
+ * We found a control with the given ID, so just get
+ * the next valid one in the list.
+ */
+ ref = NULL;
+ list_for_each_entry_continue(pos, &hdl->ctrl_refs, node) {
+ is_compound = pos->ctrl->is_array ||
+ pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < pos->ctrl->id &&
+ (is_compound & mask) == match) {
+ ref = pos;
+ break;
+ }
+ }
+ } else {
+ struct v4l2_ctrl_ref *pos;
+
+ /*
+ * No control with the given ID exists, so start
+ * searching for the next largest ID. We know there
+ * is one, otherwise the first 'if' above would have
+ * been true.
+ */
+ list_for_each_entry(pos, &hdl->ctrl_refs, node) {
+ is_compound = pos->ctrl->is_array ||
+ pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < pos->ctrl->id &&
+ (is_compound & mask) == match) {
+ ref = pos;
+ break;
+ }
+ }
+ }
+ }
+ mutex_unlock(hdl->lock);
+
+ if (!ref)
+ return -EINVAL;
+
+ ctrl = ref->ctrl;
+ memset(qc, 0, sizeof(*qc));
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ qc->id = id;
+ else
+ qc->id = ctrl->id;
+ strscpy(qc->name, ctrl->name, sizeof(qc->name));
+ qc->flags = user_flags(ctrl);
+ qc->type = ctrl->type;
+ qc->elem_size = ctrl->elem_size;
+ qc->elems = ctrl->elems;
+ qc->nr_of_dims = ctrl->nr_of_dims;
+ memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0]));
+ qc->minimum = ctrl->minimum;
+ qc->maximum = ctrl->maximum;
+ qc->default_value = ctrl->default_value;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU ||
+ ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qc->step = 1;
+ else
+ qc->step = ctrl->step;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_query_ext_ctrl);
+
+void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to,
+ const struct v4l2_query_ext_ctrl *from)
+{
+ to->id = from->id;
+ to->type = from->type;
+ to->flags = from->flags;
+ strscpy(to->name, from->name, sizeof(to->name));
+
+ switch (from->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_STRING:
+ case V4L2_CTRL_TYPE_BITMASK:
+ to->minimum = from->minimum;
+ to->maximum = from->maximum;
+ to->step = from->step;
+ to->default_value = from->default_value;
+ break;
+ default:
+ to->minimum = 0;
+ to->maximum = 0;
+ to->step = 0;
+ to->default_value = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(v4l2_query_ext_ctrl_to_v4l2_queryctrl);
+
+/* Implement VIDIOC_QUERYCTRL */
+int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
+{
+ struct v4l2_query_ext_ctrl qec = { qc->id };
+ int rc;
+
+ rc = v4l2_query_ext_ctrl(hdl, &qec);
+ if (rc)
+ return rc;
+
+ v4l2_query_ext_ctrl_to_v4l2_queryctrl(qc, &qec);
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_queryctrl);
+
+/* Implement VIDIOC_QUERYMENU */
+int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
+{
+ struct v4l2_ctrl *ctrl;
+ u32 i = qm->index;
+
+ ctrl = v4l2_ctrl_find(hdl, qm->id);
+ if (!ctrl)
+ return -EINVAL;
+
+ qm->reserved = 0;
+ /* Sanity checks */
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_MENU:
+ if (!ctrl->qmenu)
+ return -EINVAL;
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (!ctrl->qmenu_int)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (i < ctrl->minimum || i > ctrl->maximum)
+ return -EINVAL;
+
+ /* Use mask to see if this menu item should be skipped */
+ if (i < BITS_PER_LONG_LONG && (ctrl->menu_skip_mask & BIT_ULL(i)))
+ return -EINVAL;
+ /* Empty menu items should also be skipped */
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
+ if (!ctrl->qmenu[i] || ctrl->qmenu[i][0] == '\0')
+ return -EINVAL;
+ strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ } else {
+ qm->value = ctrl->qmenu_int[i];
+ }
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_querymenu);
+
+/*
+ * VIDIOC_LOG_STATUS helpers
+ */
+
+int v4l2_ctrl_log_status(struct file *file, void *priv)
+{
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->v4l2_dev) {
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
+ vfd->v4l2_dev->name);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_log_status);
+
+int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
+{
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
+
+/*
+ * VIDIOC_(UN)SUBSCRIBE_EVENT implementation
+ */
+
+static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev,
+ unsigned int elems)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ if (!ctrl)
+ return -EINVAL;
+
+ v4l2_ctrl_lock(ctrl);
+ list_add_tail(&sev->node, &ctrl->ev_subs);
+ if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
+ (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL))
+ send_initial_event(sev->fh, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ return 0;
+}
+
+static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ if (!ctrl)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+ list_del(&sev->node);
+ v4l2_ctrl_unlock(ctrl);
+}
+
+void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.ctrl.changes;
+
+ old->u.ctrl = new->u.ctrl;
+ old->u.ctrl.changes |= old_changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_replace);
+
+void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
+{
+ new->u.ctrl.changes |= old->u.ctrl.changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_merge);
+
+const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
+ .add = v4l2_ctrl_add_event,
+ .del = v4l2_ctrl_del_event,
+ .replace = v4l2_ctrl_replace,
+ .merge = v4l2_ctrl_merge,
+};
+EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
+
+int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type == V4L2_EVENT_CTRL)
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
+
+int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (!sd->ctrl_handler)
+ return -EINVAL;
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
+
+/*
+ * poll helper
+ */
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ return EPOLLPRI;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_poll);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
new file mode 100644
index 000000000000..209bc05883bb
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -0,0 +1,2802 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework core implementation.
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+
+#include "v4l2-ctrls-priv.h"
+
+static const union v4l2_ctrl_ptr ptr_null;
+
+static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl,
+ u32 changes)
+{
+ memset(ev, 0, sizeof(*ev));
+ ev->type = V4L2_EVENT_CTRL;
+ ev->id = ctrl->id;
+ ev->u.ctrl.changes = changes;
+ ev->u.ctrl.type = ctrl->type;
+ ev->u.ctrl.flags = user_flags(ctrl);
+ if (ctrl->is_ptr)
+ ev->u.ctrl.value64 = 0;
+ else
+ ev->u.ctrl.value64 = *ctrl->p_cur.p_s64;
+ ev->u.ctrl.minimum = ctrl->minimum;
+ ev->u.ctrl.maximum = ctrl->maximum;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU
+ || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ev->u.ctrl.step = 1;
+ else
+ ev->u.ctrl.step = ctrl->step;
+ ev->u.ctrl.default_value = ctrl->default_value;
+}
+
+void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_event ev;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+ fill_event(&ev, ctrl, changes);
+ v4l2_event_queue_fh(fh, &ev);
+}
+
+void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
+{
+ struct v4l2_event ev;
+ struct v4l2_subscribed_event *sev;
+
+ if (list_empty(&ctrl->ev_subs))
+ return;
+ fill_event(&ev, ctrl, changes);
+
+ list_for_each_entry(sev, &ctrl->ev_subs, node)
+ if (sev->fh != fh ||
+ (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
+ v4l2_event_queue_fh(sev->fh, &ev);
+}
+
+bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2)
+{
+ unsigned int i;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_BUTTON:
+ return false;
+ case V4L2_CTRL_TYPE_STRING:
+ for (i = 0; i < ctrl->elems; i++) {
+ unsigned int idx = i * ctrl->elem_size;
+
+ /* strings are always 0-terminated */
+ if (strcmp(ptr1.p_char + idx, ptr2.p_char + idx))
+ return false;
+ }
+ return true;
+ default:
+ return !memcmp(ptr1.p_const, ptr2.p_const,
+ ctrl->elems * ctrl->elem_size);
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_equal);
+
+/* Default intra MPEG-2 quantisation coefficients, from the specification. */
+static const u8 mpeg2_intra_quant_matrix[64] = {
+ 8, 16, 16, 19, 16, 19, 22, 22,
+ 22, 22, 22, 22, 26, 24, 26, 27,
+ 27, 27, 26, 26, 26, 26, 27, 27,
+ 27, 29, 29, 29, 34, 34, 34, 29,
+ 29, 29, 27, 27, 29, 29, 32, 32,
+ 34, 34, 37, 38, 37, 35, 35, 34,
+ 35, 38, 38, 40, 40, 40, 48, 48,
+ 46, 46, 56, 56, 58, 69, 69, 83
+};
+
+static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
+ struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant;
+ struct v4l2_ctrl_vp8_frame *p_vp8_frame;
+ struct v4l2_ctrl_vp9_frame *p_vp9_frame;
+ struct v4l2_ctrl_fwht_params *p_fwht_params;
+ struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
+ struct v4l2_ctrl_av1_sequence *p_av1_sequence;
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ if (ctrl->p_def.p_const)
+ memcpy(p, ctrl->p_def.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ p_mpeg2_sequence = p;
+
+ /* 4:2:0 */
+ p_mpeg2_sequence->chroma_format = 1;
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ p_mpeg2_picture = p;
+
+ /* interlaced top field */
+ p_mpeg2_picture->picture_structure = V4L2_MPEG2_PIC_TOP_FIELD;
+ p_mpeg2_picture->picture_coding_type =
+ V4L2_MPEG2_PIC_CODING_TYPE_I;
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ p_mpeg2_quant = p;
+
+ memcpy(p_mpeg2_quant->intra_quantiser_matrix,
+ mpeg2_intra_quant_matrix,
+ ARRAY_SIZE(mpeg2_intra_quant_matrix));
+ /*
+ * The default non-intra MPEG-2 quantisation
+ * coefficients are all 16, as per the specification.
+ */
+ memset(p_mpeg2_quant->non_intra_quantiser_matrix, 16,
+ sizeof(p_mpeg2_quant->non_intra_quantiser_matrix));
+ break;
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ p_vp8_frame = p;
+ p_vp8_frame->num_dct_parts = 1;
+ break;
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ p_vp9_frame = p;
+ p_vp9_frame->profile = 0;
+ p_vp9_frame->bit_depth = 8;
+ p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
+ V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING;
+ break;
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ p_av1_sequence = p;
+ /*
+ * The initial profile is 0 which only allows YUV 420 subsampled
+ * data. Set the subsampling flags accordingly.
+ */
+ p_av1_sequence->bit_depth = 8;
+ p_av1_sequence->flags |= V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X |
+ V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y;
+ break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ p_fwht_params = p;
+ p_fwht_params->version = V4L2_FWHT_VERSION;
+ p_fwht_params->width = 1280;
+ p_fwht_params->height = 720;
+ p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
+ (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
+ break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ p_h264_scaling_matrix = p;
+ /*
+ * The default (flat) H.264 scaling matrix when none are
+ * specified in the bitstream, this is according to formulas
+ * (7-8) and (7-9) of the specification.
+ */
+ memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix));
+ break;
+ }
+}
+
+static void std_min_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ if (ctrl->p_min.p_const)
+ memcpy(p, ctrl->p_min.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
+}
+
+static void std_max_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ if (ctrl->p_max.p_const)
+ memcpy(p, ctrl->p_max.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
+}
+
+static void __v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ u32 which, union v4l2_ctrl_ptr ptr)
+{
+ unsigned int i;
+ u32 tot_elems = ctrl->elems;
+ u32 elems = tot_elems - from_idx;
+ s64 value;
+
+ switch (which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ value = ctrl->default_value;
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ value = ctrl->maximum;
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ value = ctrl->minimum;
+ break;
+ default:
+ return;
+ }
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ if (which == V4L2_CTRL_WHICH_DEF_VAL)
+ value = ctrl->minimum;
+
+ for (i = from_idx; i < tot_elems; i++) {
+ unsigned int offset = i * ctrl->elem_size;
+
+ memset(ptr.p_char + offset, ' ', value);
+ ptr.p_char[offset + value] = '\0';
+ }
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ if (value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_s64[i] = value;
+ } else {
+ memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64));
+ }
+ break;
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ if (value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_s32[i] = value;
+ } else {
+ memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
+ }
+ break;
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ memset(ptr.p_u8 + from_idx, value, elems);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ if (value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_u16[i] = value;
+ } else {
+ memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16));
+ }
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ if (value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_u32[i] = value;
+ } else {
+ memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32));
+ }
+ break;
+ default:
+ for (i = from_idx; i < tot_elems; i++) {
+ switch (which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ std_init_compound(ctrl, i, ptr);
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ std_max_compound(ctrl, i, ptr);
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ std_min_compound(ctrl, i, ptr);
+ break;
+ }
+ }
+ break;
+ }
+}
+
+void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_DEF_VAL, ptr);
+}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_init);
+
+static void v4l2_ctrl_type_op_minimum(const struct v4l2_ctrl *ctrl,
+ u32 from_idx, union v4l2_ctrl_ptr ptr)
+{
+ __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MIN_VAL, ptr);
+}
+
+static void v4l2_ctrl_type_op_maximum(const struct v4l2_ctrl *ctrl,
+ u32 from_idx, union v4l2_ctrl_ptr ptr)
+{
+ __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MAX_VAL, ptr);
+}
+
+void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
+{
+ union v4l2_ctrl_ptr ptr = ctrl->p_cur;
+
+ if (ctrl->is_array) {
+ unsigned i;
+
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ pr_cont("[%u]", ctrl->dims[i]);
+ pr_cont(" ");
+ }
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ pr_cont("%d", *ptr.p_s32);
+ break;
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ pr_cont("%s", *ptr.p_s32 ? "true" : "false");
+ break;
+ case V4L2_CTRL_TYPE_MENU:
+ pr_cont("%s", ctrl->qmenu[*ptr.p_s32]);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]);
+ break;
+ case V4L2_CTRL_TYPE_BITMASK:
+ pr_cont("0x%08x", *ptr.p_s32);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ pr_cont("%lld", *ptr.p_s64);
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ pr_cont("%s", ptr.p_char);
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ pr_cont("%u", (unsigned)*ptr.p_u8);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ pr_cont("%u", (unsigned)*ptr.p_u16);
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ pr_cont("%u", (unsigned)*ptr.p_u32);
+ break;
+ case V4L2_CTRL_TYPE_AREA:
+ pr_cont("%ux%u", ptr.p_area->width, ptr.p_area->height);
+ break;
+ case V4L2_CTRL_TYPE_H264_SPS:
+ pr_cont("H264_SPS");
+ break;
+ case V4L2_CTRL_TYPE_H264_PPS:
+ pr_cont("H264_PPS");
+ break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ pr_cont("H264_SCALING_MATRIX");
+ break;
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ pr_cont("H264_SLICE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ pr_cont("H264_DECODE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ pr_cont("H264_PRED_WEIGHTS");
+ break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ pr_cont("FWHT_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ pr_cont("VP8_FRAME");
+ break;
+ case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
+ pr_cont("HDR10_CLL_INFO");
+ break;
+ case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
+ pr_cont("HDR10_MASTERING_DISPLAY");
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ pr_cont("MPEG2_QUANTISATION");
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ pr_cont("MPEG2_SEQUENCE");
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ pr_cont("MPEG2_PICTURE");
+ break;
+ case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
+ pr_cont("VP9_COMPRESSED_HDR");
+ break;
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ pr_cont("VP9_FRAME");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ pr_cont("HEVC_SPS");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ pr_cont("HEVC_PPS");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ pr_cont("HEVC_SLICE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ pr_cont("HEVC_SCALING_MATRIX");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
+ pr_cont("HEVC_DECODE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ pr_cont("AV1_SEQUENCE");
+ break;
+ case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
+ pr_cont("AV1_TILE_GROUP_ENTRY");
+ break;
+ case V4L2_CTRL_TYPE_AV1_FRAME:
+ pr_cont("AV1_FRAME");
+ break;
+ case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
+ pr_cont("AV1_FILM_GRAIN");
+ break;
+ case V4L2_CTRL_TYPE_RECT:
+ pr_cont("(%d,%d)/%ux%u",
+ ptr.p_rect->left, ptr.p_rect->top,
+ ptr.p_rect->width, ptr.p_rect->height);
+ break;
+ default:
+ pr_cont("unknown type %d", ctrl->type);
+ break;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_log);
+
+/*
+ * Round towards the closest legal value. Be careful when we are
+ * close to the maximum range of the control type to prevent
+ * wrap-arounds.
+ */
+#define ROUND_TO_RANGE(val, offset_type, ctrl) \
+({ \
+ offset_type offset; \
+ if ((ctrl)->maximum >= 0 && \
+ val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \
+ val = (ctrl)->maximum; \
+ else \
+ val += (s32)((ctrl)->step / 2); \
+ val = clamp_t(typeof(val), val, \
+ (ctrl)->minimum, (ctrl)->maximum); \
+ offset = (val) - (ctrl)->minimum; \
+ offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \
+ val = (ctrl)->minimum + offset; \
+ 0; \
+})
+
+/* Validate a new control */
+
+#define zero_padding(s) \
+ memset(&(s).padding, 0, sizeof((s).padding))
+#define zero_reserved(s) \
+ memset(&(s).reserved, 0, sizeof((s).reserved))
+
+static int
+validate_vp9_lf_params(struct v4l2_vp9_loop_filter *lf)
+{
+ unsigned int i;
+
+ if (lf->flags & ~(V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED |
+ V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE))
+ return -EINVAL;
+
+ /* That all values are in the accepted range. */
+ if (lf->level > GENMASK(5, 0))
+ return -EINVAL;
+
+ if (lf->sharpness > GENMASK(2, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++)
+ if (lf->ref_deltas[i] < -63 || lf->ref_deltas[i] > 63)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++)
+ if (lf->mode_deltas[i] < -63 || lf->mode_deltas[i] > 63)
+ return -EINVAL;
+
+ zero_reserved(*lf);
+ return 0;
+}
+
+static int
+validate_vp9_quant_params(struct v4l2_vp9_quantization *quant)
+{
+ if (quant->delta_q_y_dc < -15 || quant->delta_q_y_dc > 15 ||
+ quant->delta_q_uv_dc < -15 || quant->delta_q_uv_dc > 15 ||
+ quant->delta_q_uv_ac < -15 || quant->delta_q_uv_ac > 15)
+ return -EINVAL;
+
+ zero_reserved(*quant);
+ return 0;
+}
+
+static int
+validate_vp9_seg_params(struct v4l2_vp9_segmentation *seg)
+{
+ unsigned int i, j;
+
+ if (seg->flags & ~(V4L2_VP9_SEGMENTATION_FLAG_ENABLED |
+ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP |
+ V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE |
+ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA |
+ V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(seg->feature_enabled); i++) {
+ if (seg->feature_enabled[i] &
+ ~V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(seg->feature_data); i++) {
+ static const int range[] = { 255, 63, 3, 0 };
+
+ for (j = 0; j < ARRAY_SIZE(seg->feature_data[j]); j++) {
+ if (seg->feature_data[i][j] < -range[j] ||
+ seg->feature_data[i][j] > range[j])
+ return -EINVAL;
+ }
+ }
+
+ zero_reserved(*seg);
+ return 0;
+}
+
+static int
+validate_vp9_compressed_hdr(struct v4l2_ctrl_vp9_compressed_hdr *hdr)
+{
+ if (hdr->tx_mode > V4L2_VP9_TX_MODE_SELECT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame)
+{
+ int ret;
+
+ /* Make sure we're not passed invalid flags. */
+ if (frame->flags & ~(V4L2_VP9_FRAME_FLAG_KEY_FRAME |
+ V4L2_VP9_FRAME_FLAG_SHOW_FRAME |
+ V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT |
+ V4L2_VP9_FRAME_FLAG_INTRA_ONLY |
+ V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV |
+ V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX |
+ V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE |
+ V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
+ V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING |
+ V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING))
+ return -EINVAL;
+
+ if (frame->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT &&
+ frame->flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX)
+ return -EINVAL;
+
+ if (frame->profile > V4L2_VP9_PROFILE_MAX)
+ return -EINVAL;
+
+ if (frame->reset_frame_context > V4L2_VP9_RESET_FRAME_CTX_ALL)
+ return -EINVAL;
+
+ if (frame->frame_context_idx >= V4L2_VP9_NUM_FRAME_CTX)
+ return -EINVAL;
+
+ /*
+ * Profiles 0 and 1 only support 8-bit depth, profiles 2 and 3 only 10
+ * and 12 bit depths.
+ */
+ if ((frame->profile < 2 && frame->bit_depth != 8) ||
+ (frame->profile >= 2 &&
+ (frame->bit_depth != 10 && frame->bit_depth != 12)))
+ return -EINVAL;
+
+ /* Profile 0 and 2 only accept YUV 4:2:0. */
+ if ((frame->profile == 0 || frame->profile == 2) &&
+ (!(frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) ||
+ !(frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING)))
+ return -EINVAL;
+
+ /* Profile 1 and 3 only accept YUV 4:2:2, 4:4:0 and 4:4:4. */
+ if ((frame->profile == 1 || frame->profile == 3) &&
+ ((frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) &&
+ (frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING)))
+ return -EINVAL;
+
+ if (frame->interpolation_filter > V4L2_VP9_INTERP_FILTER_SWITCHABLE)
+ return -EINVAL;
+
+ /*
+ * According to the spec, tile_cols_log2 shall be less than or equal
+ * to 6.
+ */
+ if (frame->tile_cols_log2 > 6)
+ return -EINVAL;
+
+ if (frame->reference_mode > V4L2_VP9_REFERENCE_MODE_SELECT)
+ return -EINVAL;
+
+ ret = validate_vp9_lf_params(&frame->lf);
+ if (ret)
+ return ret;
+
+ ret = validate_vp9_quant_params(&frame->quant);
+ if (ret)
+ return ret;
+
+ ret = validate_vp9_seg_params(&frame->seg);
+ if (ret)
+ return ret;
+
+ zero_reserved(*frame);
+ return 0;
+}
+
+static int validate_av1_quantization(struct v4l2_av1_quantization *q)
+{
+ if (q->flags > GENMASK(2, 0))
+ return -EINVAL;
+
+ if (q->delta_q_y_dc < -64 || q->delta_q_y_dc > 63 ||
+ q->delta_q_u_dc < -64 || q->delta_q_u_dc > 63 ||
+ q->delta_q_v_dc < -64 || q->delta_q_v_dc > 63 ||
+ q->delta_q_u_ac < -64 || q->delta_q_u_ac > 63 ||
+ q->delta_q_v_ac < -64 || q->delta_q_v_ac > 63 ||
+ q->delta_q_res > GENMASK(1, 0))
+ return -EINVAL;
+
+ if (q->qm_y > GENMASK(3, 0) ||
+ q->qm_u > GENMASK(3, 0) ||
+ q->qm_v > GENMASK(3, 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_segmentation(struct v4l2_av1_segmentation *s)
+{
+ u32 i;
+ u32 j;
+
+ if (s->flags > GENMASK(4, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(s->feature_data); i++) {
+ static const int segmentation_feature_signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 };
+ static const int segmentation_feature_max[] = { 255, 63, 63, 63, 63, 7, 0, 0};
+
+ for (j = 0; j < ARRAY_SIZE(s->feature_data[j]); j++) {
+ s32 limit = segmentation_feature_max[j];
+
+ if (segmentation_feature_signed[j]) {
+ if (s->feature_data[i][j] < -limit ||
+ s->feature_data[i][j] > limit)
+ return -EINVAL;
+ } else {
+ if (s->feature_data[i][j] < 0 || s->feature_data[i][j] > limit)
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int validate_av1_loop_filter(struct v4l2_av1_loop_filter *lf)
+{
+ u32 i;
+
+ if (lf->flags > GENMASK(3, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->level); i++) {
+ if (lf->level[i] > GENMASK(5, 0))
+ return -EINVAL;
+ }
+
+ if (lf->sharpness > GENMASK(2, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) {
+ if (lf->ref_deltas[i] < -64 || lf->ref_deltas[i] > 63)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) {
+ if (lf->mode_deltas[i] < -64 || lf->mode_deltas[i] > 63)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_av1_cdef(struct v4l2_av1_cdef *cdef)
+{
+ u32 i;
+
+ if (cdef->damping_minus_3 > GENMASK(1, 0) ||
+ cdef->bits > GENMASK(1, 0))
+ return -EINVAL;
+
+ for (i = 0; i < 1 << cdef->bits; i++) {
+ if (cdef->y_pri_strength[i] > GENMASK(3, 0) ||
+ cdef->y_sec_strength[i] > 4 ||
+ cdef->uv_pri_strength[i] > GENMASK(3, 0) ||
+ cdef->uv_sec_strength[i] > 4)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_av1_loop_restauration(struct v4l2_av1_loop_restoration *lr)
+{
+ if (lr->lr_unit_shift > 3 || lr->lr_uv_shift > 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_film_grain(struct v4l2_ctrl_av1_film_grain *fg)
+{
+ u32 i;
+
+ if (fg->flags > GENMASK(4, 0))
+ return -EINVAL;
+
+ if (fg->film_grain_params_ref_idx > GENMASK(2, 0) ||
+ fg->num_y_points > 14 ||
+ fg->num_cb_points > 10 ||
+ fg->num_cr_points > GENMASK(3, 0) ||
+ fg->grain_scaling_minus_8 > GENMASK(1, 0) ||
+ fg->ar_coeff_lag > GENMASK(1, 0) ||
+ fg->ar_coeff_shift_minus_6 > GENMASK(1, 0) ||
+ fg->grain_scale_shift > GENMASK(1, 0))
+ return -EINVAL;
+
+ if (!(fg->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN))
+ return 0;
+
+ for (i = 1; i < fg->num_y_points; i++)
+ if (fg->point_y_value[i] <= fg->point_y_value[i - 1])
+ return -EINVAL;
+
+ for (i = 1; i < fg->num_cb_points; i++)
+ if (fg->point_cb_value[i] <= fg->point_cb_value[i - 1])
+ return -EINVAL;
+
+ for (i = 1; i < fg->num_cr_points; i++)
+ if (fg->point_cr_value[i] <= fg->point_cr_value[i - 1])
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_frame(struct v4l2_ctrl_av1_frame *f)
+{
+ int ret = 0;
+
+ ret = validate_av1_quantization(&f->quantization);
+ if (ret)
+ return ret;
+ ret = validate_av1_segmentation(&f->segmentation);
+ if (ret)
+ return ret;
+ ret = validate_av1_loop_filter(&f->loop_filter);
+ if (ret)
+ return ret;
+ ret = validate_av1_cdef(&f->cdef);
+ if (ret)
+ return ret;
+ ret = validate_av1_loop_restauration(&f->loop_restoration);
+ if (ret)
+ return ret;
+
+ if (f->flags &
+ ~(V4L2_AV1_FRAME_FLAG_SHOW_FRAME |
+ V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME |
+ V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE |
+ V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE |
+ V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS |
+ V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV |
+ V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC |
+ V4L2_AV1_FRAME_FLAG_USE_SUPERRES |
+ V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV |
+ V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE |
+ V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS |
+ V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF |
+ V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION |
+ V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT |
+ V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET |
+ V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED |
+ V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT |
+ V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE |
+ V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT |
+ V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING))
+ return -EINVAL;
+
+ if (f->superres_denom > GENMASK(2, 0) + 9)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * validate_av1_sequence - validate AV1 sequence header fields
+ * @s: control struct from userspace
+ *
+ * Implements AV1 spec §5.5.2 color_config() checks that are
+ * possible with the current v4l2_ctrl_av1_sequence definition.
+ *
+ * TODO: extend validation once additional fields such as
+ * color_primaries, transfer_characteristics,
+ * matrix_coefficients, and chroma_sample_position
+ * are added to the uAPI.
+ *
+ * Returns 0 if valid, -EINVAL otherwise.
+ */
+static int validate_av1_sequence(struct v4l2_ctrl_av1_sequence *s)
+{
+ const bool mono = s->flags & V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME;
+ const bool sx = s->flags & V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X;
+ const bool sy = s->flags & V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y;
+ const bool uv_dq = s->flags & V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q;
+
+ /* 1. Reject unknown flags */
+ if (s->flags &
+ ~(V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE |
+ V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION |
+ V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME |
+ V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE |
+ V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X |
+ V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y |
+ V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT |
+ V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q))
+ return -EINVAL;
+
+ /* 2. Profile range */
+ if (s->seq_profile > 2)
+ return -EINVAL;
+
+ /* 3. Monochrome shortcut */
+ if (mono) {
+ /* Profile 1 forbids monochrome */
+ if (s->seq_profile == 1)
+ return -EINVAL;
+
+ /* Mono → subsampling must look like 4:0:0: sx=1, sy=1 */
+ if (!sx || !sy)
+ return -EINVAL;
+
+ /* separate_uv_delta_q must be 0 */
+ if (uv_dq)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ /* 4. Profile-specific rules */
+ switch (s->seq_profile) {
+ case 0:
+ /* Profile 0: only 8/10-bit, subsampling=4:2:0 (sx=1, sy=1) */
+ if (s->bit_depth != 8 && s->bit_depth != 10)
+ return -EINVAL;
+ if (!(sx && sy))
+ return -EINVAL;
+ break;
+
+ case 1:
+ /* Profile 1: only 8/10-bit, subsampling=4:4:4 (sx=0, sy=0) */
+ if (s->bit_depth != 8 && s->bit_depth != 10)
+ return -EINVAL;
+ if (sx || sy)
+ return -EINVAL;
+ break;
+
+ case 2:
+ /* Profile 2: 8/10/12-bit allowed */
+ if (s->bit_depth != 8 && s->bit_depth != 10 &&
+ s->bit_depth != 12)
+ return -EINVAL;
+
+ if (s->bit_depth == 12) {
+ if (!sx) {
+ /* 4:4:4 → sy must be 0 */
+ if (sy)
+ return -EINVAL;
+ } else {
+ /* sx=1 → sy=0 (4:2:2) or sy=1 (4:2:0) */
+ if (sy != 0 && sy != 1)
+ return -EINVAL;
+ }
+ } else {
+ /* 8/10-bit → only 4:2:2 allowed (sx=1, sy=0) */
+ if (!(sx && !sy))
+ return -EINVAL;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Compound controls validation requires setting unused fields/flags to zero
+ * in order to properly detect unchanged controls with v4l2_ctrl_type_op_equal's
+ * memcmp.
+ */
+static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
+ struct v4l2_ctrl_vp8_frame *p_vp8_frame;
+ struct v4l2_ctrl_fwht_params *p_fwht_params;
+ struct v4l2_ctrl_h264_sps *p_h264_sps;
+ struct v4l2_ctrl_h264_pps *p_h264_pps;
+ struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights;
+ struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
+ struct v4l2_ctrl_h264_decode_params *p_h264_dec_params;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
+ struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params;
+ struct v4l2_area *area;
+ struct v4l2_rect *rect;
+ void *p = ptr.p + idx * ctrl->elem_size;
+ unsigned int i;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ p_mpeg2_sequence = p;
+
+ switch (p_mpeg2_sequence->chroma_format) {
+ case 1: /* 4:2:0 */
+ case 2: /* 4:2:2 */
+ case 3: /* 4:4:4 */
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ p_mpeg2_picture = p;
+
+ switch (p_mpeg2_picture->intra_dc_precision) {
+ case 0: /* 8 bits */
+ case 1: /* 9 bits */
+ case 2: /* 10 bits */
+ case 3: /* 11 bits */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_picture->picture_structure) {
+ case V4L2_MPEG2_PIC_TOP_FIELD:
+ case V4L2_MPEG2_PIC_BOTTOM_FIELD:
+ case V4L2_MPEG2_PIC_FRAME:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_picture->picture_coding_type) {
+ case V4L2_MPEG2_PIC_CODING_TYPE_I:
+ case V4L2_MPEG2_PIC_CODING_TYPE_P:
+ case V4L2_MPEG2_PIC_CODING_TYPE_B:
+ break;
+ default:
+ return -EINVAL;
+ }
+ zero_reserved(*p_mpeg2_picture);
+ break;
+
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ break;
+
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ p_fwht_params = p;
+ if (p_fwht_params->version < V4L2_FWHT_VERSION)
+ return -EINVAL;
+ if (!p_fwht_params->width || !p_fwht_params->height)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SPS:
+ p_h264_sps = p;
+
+ /* Some syntax elements are only conditionally valid */
+ if (p_h264_sps->pic_order_cnt_type != 0) {
+ p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 = 0;
+ } else if (p_h264_sps->pic_order_cnt_type != 1) {
+ p_h264_sps->num_ref_frames_in_pic_order_cnt_cycle = 0;
+ p_h264_sps->offset_for_non_ref_pic = 0;
+ p_h264_sps->offset_for_top_to_bottom_field = 0;
+ memset(&p_h264_sps->offset_for_ref_frame, 0,
+ sizeof(p_h264_sps->offset_for_ref_frame));
+ }
+
+ if (!V4L2_H264_SPS_HAS_CHROMA_FORMAT(p_h264_sps)) {
+ p_h264_sps->chroma_format_idc = 1;
+ p_h264_sps->bit_depth_luma_minus8 = 0;
+ p_h264_sps->bit_depth_chroma_minus8 = 0;
+
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS;
+ }
+
+ if (p_h264_sps->chroma_format_idc < 3)
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
+
+ if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD;
+
+ /*
+ * Chroma 4:2:2 format require at least High 4:2:2 profile.
+ *
+ * The H264 specification and well-known parser implementations
+ * use profile-idc values directly, as that is clearer and
+ * less ambiguous. We do the same here.
+ */
+ if (p_h264_sps->profile_idc < 122 &&
+ p_h264_sps->chroma_format_idc > 1)
+ return -EINVAL;
+ /* Chroma 4:4:4 format require at least High 4:2:2 profile */
+ if (p_h264_sps->profile_idc < 244 &&
+ p_h264_sps->chroma_format_idc > 2)
+ return -EINVAL;
+ if (p_h264_sps->chroma_format_idc > 3)
+ return -EINVAL;
+
+ if (p_h264_sps->bit_depth_luma_minus8 > 6)
+ return -EINVAL;
+ if (p_h264_sps->bit_depth_chroma_minus8 > 6)
+ return -EINVAL;
+ if (p_h264_sps->log2_max_frame_num_minus4 > 12)
+ return -EINVAL;
+ if (p_h264_sps->pic_order_cnt_type > 2)
+ return -EINVAL;
+ if (p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 > 12)
+ return -EINVAL;
+ if (p_h264_sps->max_num_ref_frames > V4L2_H264_REF_LIST_LEN)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_PPS:
+ p_h264_pps = p;
+
+ if (p_h264_pps->num_slice_groups_minus1 > 7)
+ return -EINVAL;
+ if (p_h264_pps->num_ref_idx_l0_default_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ if (p_h264_pps->num_ref_idx_l1_default_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ if (p_h264_pps->weighted_bipred_idc > 2)
+ return -EINVAL;
+ /*
+ * pic_init_qp_minus26 shall be in the range of
+ * -(26 + QpBdOffset_y) to +25, inclusive,
+ * where QpBdOffset_y is 6 * bit_depth_luma_minus8
+ */
+ if (p_h264_pps->pic_init_qp_minus26 < -62 ||
+ p_h264_pps->pic_init_qp_minus26 > 25)
+ return -EINVAL;
+ if (p_h264_pps->pic_init_qs_minus26 < -26 ||
+ p_h264_pps->pic_init_qs_minus26 > 25)
+ return -EINVAL;
+ if (p_h264_pps->chroma_qp_index_offset < -12 ||
+ p_h264_pps->chroma_qp_index_offset > 12)
+ return -EINVAL;
+ if (p_h264_pps->second_chroma_qp_index_offset < -12 ||
+ p_h264_pps->second_chroma_qp_index_offset > 12)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ break;
+
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ p_h264_pred_weights = p;
+
+ if (p_h264_pred_weights->luma_log2_weight_denom > 7)
+ return -EINVAL;
+ if (p_h264_pred_weights->chroma_log2_weight_denom > 7)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ p_h264_slice_params = p;
+
+ if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
+ p_h264_slice_params->flags &=
+ ~V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED;
+
+ if (p_h264_slice_params->colour_plane_id > 2)
+ return -EINVAL;
+ if (p_h264_slice_params->cabac_init_idc > 2)
+ return -EINVAL;
+ if (p_h264_slice_params->disable_deblocking_filter_idc > 2)
+ return -EINVAL;
+ if (p_h264_slice_params->slice_alpha_c0_offset_div2 < -6 ||
+ p_h264_slice_params->slice_alpha_c0_offset_div2 > 6)
+ return -EINVAL;
+ if (p_h264_slice_params->slice_beta_offset_div2 < -6 ||
+ p_h264_slice_params->slice_beta_offset_div2 > 6)
+ return -EINVAL;
+
+ if (p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_I ||
+ p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_SI)
+ p_h264_slice_params->num_ref_idx_l0_active_minus1 = 0;
+ if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
+ p_h264_slice_params->num_ref_idx_l1_active_minus1 = 0;
+
+ if (p_h264_slice_params->num_ref_idx_l0_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ if (p_h264_slice_params->num_ref_idx_l1_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ zero_reserved(*p_h264_slice_params);
+ break;
+
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ p_h264_dec_params = p;
+
+ if (p_h264_dec_params->nal_ref_idc > 3)
+ return -EINVAL;
+ for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
+ struct v4l2_h264_dpb_entry *dpb_entry =
+ &p_h264_dec_params->dpb[i];
+
+ zero_reserved(*dpb_entry);
+ }
+ zero_reserved(*p_h264_dec_params);
+ break;
+
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ p_vp8_frame = p;
+
+ switch (p_vp8_frame->num_dct_parts) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ zero_padding(p_vp8_frame->segment);
+ zero_padding(p_vp8_frame->lf);
+ zero_padding(p_vp8_frame->quant);
+ zero_padding(p_vp8_frame->entropy);
+ zero_padding(p_vp8_frame->coder_state);
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ p_hevc_sps = p;
+
+ if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) {
+ p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0;
+ p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0;
+ p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
+ p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0;
+ }
+
+ if (!(p_hevc_sps->flags &
+ V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT))
+ p_hevc_sps->num_long_term_ref_pics_sps = 0;
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ p_hevc_pps = p;
+
+ if (!(p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED))
+ p_hevc_pps->diff_cu_qp_delta_depth = 0;
+
+ if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) {
+ p_hevc_pps->num_tile_columns_minus1 = 0;
+ p_hevc_pps->num_tile_rows_minus1 = 0;
+ memset(&p_hevc_pps->column_width_minus1, 0,
+ sizeof(p_hevc_pps->column_width_minus1));
+ memset(&p_hevc_pps->row_height_minus1, 0,
+ sizeof(p_hevc_pps->row_height_minus1));
+
+ p_hevc_pps->flags &=
+ ~V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED;
+ }
+
+ if (p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) {
+ p_hevc_pps->pps_beta_offset_div2 = 0;
+ p_hevc_pps->pps_tc_offset_div2 = 0;
+ }
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
+ p_hevc_decode_params = p;
+
+ if (p_hevc_decode_params->num_active_dpb_entries >
+ V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ break;
+
+ case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
+ break;
+
+ case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
+ p_hdr10_mastering = p;
+
+ for (i = 0; i < 3; ++i) {
+ if (p_hdr10_mastering->display_primaries_x[i] <
+ V4L2_HDR10_MASTERING_PRIMARIES_X_LOW ||
+ p_hdr10_mastering->display_primaries_x[i] >
+ V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH ||
+ p_hdr10_mastering->display_primaries_y[i] <
+ V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW ||
+ p_hdr10_mastering->display_primaries_y[i] >
+ V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH)
+ return -EINVAL;
+ }
+
+ if (p_hdr10_mastering->white_point_x <
+ V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW ||
+ p_hdr10_mastering->white_point_x >
+ V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH ||
+ p_hdr10_mastering->white_point_y <
+ V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW ||
+ p_hdr10_mastering->white_point_y >
+ V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH)
+ return -EINVAL;
+
+ if (p_hdr10_mastering->max_display_mastering_luminance <
+ V4L2_HDR10_MASTERING_MAX_LUMA_LOW ||
+ p_hdr10_mastering->max_display_mastering_luminance >
+ V4L2_HDR10_MASTERING_MAX_LUMA_HIGH ||
+ p_hdr10_mastering->min_display_mastering_luminance <
+ V4L2_HDR10_MASTERING_MIN_LUMA_LOW ||
+ p_hdr10_mastering->min_display_mastering_luminance >
+ V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
+ return -EINVAL;
+
+ /* The following restriction comes from ITU-T Rec. H.265 spec */
+ if (p_hdr10_mastering->max_display_mastering_luminance ==
+ V4L2_HDR10_MASTERING_MAX_LUMA_LOW &&
+ p_hdr10_mastering->min_display_mastering_luminance ==
+ V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
+ return -EINVAL;
+
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ break;
+
+ case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
+ return validate_vp9_compressed_hdr(p);
+
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ return validate_vp9_frame(p);
+ case V4L2_CTRL_TYPE_AV1_FRAME:
+ return validate_av1_frame(p);
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ return validate_av1_sequence(p);
+ case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
+ break;
+ case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
+ return validate_av1_film_grain(p);
+
+ case V4L2_CTRL_TYPE_AREA:
+ area = p;
+ if (!area->width || !area->height)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_RECT:
+ rect = p;
+ if (!rect->width || !rect->height)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ size_t len;
+ u64 offset;
+ s64 val;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
+ case V4L2_CTRL_TYPE_INTEGER64:
+ /*
+ * We can't use the ROUND_TO_RANGE define here due to
+ * the u64 divide that needs special care.
+ */
+ val = ptr.p_s64[idx];
+ if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2))
+ val = ctrl->maximum;
+ else
+ val += (s64)(ctrl->step / 2);
+ val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum);
+ offset = val - ctrl->minimum;
+ do_div(offset, ctrl->step);
+ ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step;
+ return 0;
+ case V4L2_CTRL_TYPE_U8:
+ return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl);
+ case V4L2_CTRL_TYPE_U16:
+ return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl);
+ case V4L2_CTRL_TYPE_U32:
+ return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl);
+
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ ptr.p_s32[idx] = !!ptr.p_s32[idx];
+ return 0;
+
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum)
+ return -ERANGE;
+ if (ptr.p_s32[idx] < BITS_PER_LONG_LONG &&
+ (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx])))
+ return -EINVAL;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
+ ctrl->qmenu[ptr.p_s32[idx]][0] == '\0')
+ return -EINVAL;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BITMASK:
+ ptr.p_s32[idx] &= ctrl->maximum;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ ptr.p_s32[idx] = 0;
+ return 0;
+
+ case V4L2_CTRL_TYPE_STRING:
+ idx *= ctrl->elem_size;
+ len = strlen(ptr.p_char + idx);
+ if (len < ctrl->minimum)
+ return -ERANGE;
+ if ((len - (u32)ctrl->minimum) % (u32)ctrl->step)
+ return -ERANGE;
+ return 0;
+
+ default:
+ return std_validate_compound(ctrl, idx, ptr);
+ }
+}
+
+int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr)
+{
+ unsigned int i;
+ int ret = 0;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_U8:
+ if (ctrl->maximum == 0xff && ctrl->minimum == 0 && ctrl->step == 1)
+ return 0;
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ if (ctrl->maximum == 0xffff && ctrl->minimum == 0 && ctrl->step == 1)
+ return 0;
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ if (ctrl->maximum == 0xffffffff && ctrl->minimum == 0 && ctrl->step == 1)
+ return 0;
+ break;
+
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32));
+ return 0;
+ }
+
+ for (i = 0; !ret && i < ctrl->new_elems; i++)
+ ret = std_validate_elem(ctrl, i, ptr);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_validate);
+
+static const struct v4l2_ctrl_type_ops std_type_ops = {
+ .equal = v4l2_ctrl_type_op_equal,
+ .init = v4l2_ctrl_type_op_init,
+ .minimum = v4l2_ctrl_type_op_minimum,
+ .maximum = v4l2_ctrl_type_op_maximum,
+ .log = v4l2_ctrl_type_op_log,
+ .validate = v4l2_ctrl_type_op_validate,
+};
+
+void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
+{
+ if (!ctrl)
+ return;
+ if (!notify) {
+ ctrl->call_notify = 0;
+ return;
+ }
+ if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
+ return;
+ ctrl->handler->notify = notify;
+ ctrl->handler->notify_priv = priv;
+ ctrl->call_notify = 1;
+}
+EXPORT_SYMBOL(v4l2_ctrl_notify);
+
+/* Copy the one value to another. */
+static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to,
+ unsigned int elems)
+{
+ if (ctrl == NULL)
+ return;
+ memcpy(to.p, from.p_const, elems * ctrl->elem_size);
+}
+
+/* Copy the new value to the current value. */
+void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
+{
+ bool changed;
+
+ if (ctrl == NULL)
+ return;
+
+ /* has_changed is set by cluster_changed */
+ changed = ctrl->has_changed;
+ if (changed) {
+ if (ctrl->is_dyn_array)
+ ctrl->elems = ctrl->new_elems;
+ ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur, ctrl->elems);
+ }
+
+ if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
+ /* Note: CH_FLAGS is only set for auto clusters. */
+ ctrl->flags &=
+ ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
+ if (!is_cur_manual(ctrl->cluster[0])) {
+ ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ if (ctrl->cluster[0]->has_volatiles)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+ fh = NULL;
+ }
+ if (changed || ch_flags) {
+ /* If a control was changed that was not one of the controls
+ modified by the application, then send the event to all. */
+ if (!ctrl->is_new)
+ fh = NULL;
+ send_event(fh, ctrl,
+ (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
+ if (ctrl->call_notify && changed && ctrl->handler->notify)
+ ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
+ }
+}
+
+/* Copy the current value to the new value */
+void cur_to_new(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl == NULL)
+ return;
+ if (ctrl->is_dyn_array)
+ ctrl->new_elems = ctrl->elems;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems);
+}
+
+static bool req_alloc_array(struct v4l2_ctrl_ref *ref, u32 elems)
+{
+ void *tmp;
+
+ if (elems == ref->p_req_array_alloc_elems)
+ return true;
+ if (ref->ctrl->is_dyn_array &&
+ elems < ref->p_req_array_alloc_elems)
+ return true;
+
+ tmp = kvmalloc(elems * ref->ctrl->elem_size, GFP_KERNEL);
+
+ if (!tmp) {
+ ref->p_req_array_enomem = true;
+ return false;
+ }
+ ref->p_req_array_enomem = false;
+ kvfree(ref->p_req.p);
+ ref->p_req.p = tmp;
+ ref->p_req_array_alloc_elems = elems;
+ return true;
+}
+
+/* Copy the new value to the request value */
+void new_to_req(struct v4l2_ctrl_ref *ref)
+{
+ struct v4l2_ctrl *ctrl;
+
+ if (!ref)
+ return;
+
+ ctrl = ref->ctrl;
+ if (ctrl->is_array && !req_alloc_array(ref, ctrl->new_elems))
+ return;
+
+ ref->p_req_elems = ctrl->new_elems;
+ ptr_to_ptr(ctrl, ctrl->p_new, ref->p_req, ref->p_req_elems);
+ ref->p_req_valid = true;
+}
+
+/* Copy the current value to the request value */
+void cur_to_req(struct v4l2_ctrl_ref *ref)
+{
+ struct v4l2_ctrl *ctrl;
+
+ if (!ref)
+ return;
+
+ ctrl = ref->ctrl;
+ if (ctrl->is_array && !req_alloc_array(ref, ctrl->elems))
+ return;
+
+ ref->p_req_elems = ctrl->elems;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req, ctrl->elems);
+ ref->p_req_valid = true;
+}
+
+/* Copy the request value to the new value */
+int req_to_new(struct v4l2_ctrl_ref *ref)
+{
+ struct v4l2_ctrl *ctrl;
+
+ if (!ref)
+ return 0;
+
+ ctrl = ref->ctrl;
+
+ /*
+ * This control was never set in the request, so just use the current
+ * value.
+ */
+ if (!ref->p_req_valid) {
+ if (ctrl->is_dyn_array)
+ ctrl->new_elems = ctrl->elems;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems);
+ return 0;
+ }
+
+ /* Not an array, so just copy the request value */
+ if (!ctrl->is_array) {
+ ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems);
+ return 0;
+ }
+
+ /* Sanity check, should never happen */
+ if (WARN_ON(!ref->p_req_array_alloc_elems))
+ return -ENOMEM;
+
+ if (!ctrl->is_dyn_array &&
+ ref->p_req_elems != ctrl->p_array_alloc_elems)
+ return -ENOMEM;
+
+ /*
+ * Check if the number of elements in the request is more than the
+ * elements in ctrl->p_array. If so, attempt to realloc ctrl->p_array.
+ * Note that p_array is allocated with twice the number of elements
+ * in the dynamic array since it has to store both the current and
+ * new value of such a control.
+ */
+ if (ref->p_req_elems > ctrl->p_array_alloc_elems) {
+ unsigned int sz = ref->p_req_elems * ctrl->elem_size;
+ void *old = ctrl->p_array;
+ void *tmp = kvzalloc(2 * sz, GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size);
+ memcpy(tmp + sz, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
+ ctrl->p_new.p = tmp;
+ ctrl->p_cur.p = tmp + sz;
+ ctrl->p_array = tmp;
+ ctrl->p_array_alloc_elems = ref->p_req_elems;
+ kvfree(old);
+ }
+
+ ctrl->new_elems = ref->p_req_elems;
+ ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems);
+ return 0;
+}
+
+/* Control range checking */
+int check_range(enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def)
+{
+ switch (type) {
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ if (step != 1 || max > 1 || min < 0)
+ return -ERANGE;
+ fallthrough;
+ case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER64:
+ if (step == 0 || min > max || def < min || def > max)
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (step || min || !max || (def & ~max))
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (min > max || def < min || def > max ||
+ min < 0 || (step && max >= BITS_PER_LONG_LONG))
+ return -ERANGE;
+ /* Note: step == menu_skip_mask for menu controls.
+ So here we check if the default value is masked out. */
+ if (def < BITS_PER_LONG_LONG && (step & BIT_ULL(def)))
+ return -EINVAL;
+ return 0;
+ case V4L2_CTRL_TYPE_STRING:
+ if (min > max || min < 0 || step < 1 || def)
+ return -ERANGE;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+/* Set the handler's error code if it wasn't set earlier already */
+static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
+{
+ if (hdl->error == 0)
+ hdl->error = err;
+ return err;
+}
+
+/* Initialize the handler */
+int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
+ unsigned nr_of_controls_hint,
+ struct lock_class_key *key, const char *name)
+{
+ mutex_init(&hdl->_lock);
+ hdl->lock = &hdl->_lock;
+ lockdep_set_class_and_name(hdl->lock, key, name);
+ INIT_LIST_HEAD(&hdl->ctrls);
+ INIT_LIST_HEAD(&hdl->ctrl_refs);
+ hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
+ hdl->buckets = kvcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]),
+ GFP_KERNEL);
+ hdl->error = hdl->buckets ? 0 : -ENOMEM;
+ v4l2_ctrl_handler_init_request(hdl);
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
+
+/* Free all controls and control refs */
+int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl_ref *ref, *next_ref;
+ struct v4l2_ctrl *ctrl, *next_ctrl;
+ struct v4l2_subscribed_event *sev, *next_sev;
+
+ if (!hdl)
+ return 0;
+
+ if (!hdl->buckets)
+ return hdl->error;
+
+ v4l2_ctrl_handler_free_request(hdl);
+
+ mutex_lock(hdl->lock);
+ /* Free all nodes */
+ list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
+ list_del(&ref->node);
+ if (ref->p_req_array_alloc_elems)
+ kvfree(ref->p_req.p);
+ kfree(ref);
+ }
+ /* Free all controls owned by the handler */
+ list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
+ list_del(&ctrl->node);
+ list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
+ list_del(&sev->node);
+ kvfree(ctrl->p_array);
+ kvfree(ctrl);
+ }
+ kvfree(hdl->buckets);
+ hdl->buckets = NULL;
+ hdl->cached = NULL;
+ mutex_unlock(hdl->lock);
+ mutex_destroy(&hdl->_lock);
+
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_free);
+
+/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer
+ be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing
+ with applications that do not use the NEXT_CTRL flag.
+
+ We just find the n-th private user control. It's O(N), but that should not
+ be an issue in this particular case. */
+static struct v4l2_ctrl_ref *find_private_ref(
+ struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+
+ id -= V4L2_CID_PRIVATE_BASE;
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ /* Search for private user controls that are compatible with
+ VIDIOC_G/S_CTRL. */
+ if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER &&
+ V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) {
+ if (!ref->ctrl->is_int)
+ continue;
+ if (id == 0)
+ return ref;
+ id--;
+ }
+ }
+ return NULL;
+}
+
+/* Find a control with the given ID. */
+struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+ int bucket;
+
+ id &= V4L2_CTRL_ID_MASK;
+
+ /* Old-style private controls need special handling */
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ return find_private_ref(hdl, id);
+ bucket = id % hdl->nr_of_buckets;
+
+ /* Simple optimization: cache the last control found */
+ if (hdl->cached && hdl->cached->ctrl->id == id)
+ return hdl->cached;
+
+ /* Not in cache, search the hash */
+ ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
+ while (ref && ref->ctrl->id != id)
+ ref = ref->next;
+
+ if (ref)
+ hdl->cached = ref; /* cache it! */
+ return ref;
+}
+
+/* Find a control with the given ID. Take the handler's lock first. */
+struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = NULL;
+
+ if (hdl) {
+ mutex_lock(hdl->lock);
+ ref = find_ref(hdl, id);
+ mutex_unlock(hdl->lock);
+ }
+ return ref;
+}
+
+/* Find a control with the given ID. */
+struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return ref ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL(v4l2_ctrl_find);
+
+/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
+int handler_new_ref(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl,
+ struct v4l2_ctrl_ref **ctrl_ref,
+ bool from_other_dev, bool allocate_req)
+{
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl_ref *new_ref;
+ u32 id = ctrl->id;
+ u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1;
+ int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
+ unsigned int size_extra_req = 0;
+
+ if (ctrl_ref)
+ *ctrl_ref = NULL;
+
+ /*
+ * Automatically add the control class if it is not yet present and
+ * the new control is not a compound control.
+ */
+ if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES &&
+ id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL)
+ if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0))
+ return hdl->error;
+
+ if (hdl->error)
+ return hdl->error;
+
+ if (allocate_req && !ctrl->is_array)
+ size_extra_req = ctrl->elems * ctrl->elem_size;
+ new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
+ if (!new_ref)
+ return handler_set_err(hdl, -ENOMEM);
+ new_ref->ctrl = ctrl;
+ new_ref->from_other_dev = from_other_dev;
+ if (size_extra_req)
+ new_ref->p_req.p = &new_ref[1];
+
+ INIT_LIST_HEAD(&new_ref->node);
+
+ mutex_lock(hdl->lock);
+
+ /* Add immediately at the end of the list if the list is empty, or if
+ the last element in the list has a lower ID.
+ This ensures that when elements are added in ascending order the
+ insertion is an O(1) operation. */
+ if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
+ list_add_tail(&new_ref->node, &hdl->ctrl_refs);
+ goto insert_in_hash;
+ }
+
+ /* Find insert position in sorted list */
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ if (ref->ctrl->id < id)
+ continue;
+ /* Don't add duplicates */
+ if (ref->ctrl->id == id) {
+ kfree(new_ref);
+ goto unlock;
+ }
+ list_add(&new_ref->node, ref->node.prev);
+ break;
+ }
+
+insert_in_hash:
+ /* Insert the control node in the hash */
+ new_ref->next = hdl->buckets[bucket];
+ hdl->buckets[bucket] = new_ref;
+ if (ctrl_ref)
+ *ctrl_ref = new_ref;
+ if (ctrl->handler == hdl) {
+ /* By default each control starts in a cluster of its own.
+ * new_ref->ctrl is basically a cluster array with one
+ * element, so that's perfect to use as the cluster pointer.
+ * But only do this for the handler that owns the control.
+ */
+ ctrl->cluster = &new_ref->ctrl;
+ ctrl->ncontrols = 1;
+ }
+
+unlock:
+ mutex_unlock(hdl->lock);
+ return 0;
+}
+
+/* Add a new control */
+static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ const struct v4l2_ctrl_type_ops *type_ops,
+ u32 id, const char *name, enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def,
+ const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
+ u32 flags, const char * const *qmenu,
+ const s64 *qmenu_int,
+ const union v4l2_ctrl_ptr p_def,
+ const union v4l2_ctrl_ptr p_min,
+ const union v4l2_ctrl_ptr p_max,
+ void *priv)
+{
+ struct v4l2_ctrl *ctrl;
+ unsigned sz_extra;
+ unsigned nr_of_dims = 0;
+ unsigned elems = 1;
+ bool is_array;
+ unsigned tot_ctrl_size;
+ void *data;
+ int err;
+
+ if (hdl->error)
+ return NULL;
+
+ while (dims && dims[nr_of_dims]) {
+ elems *= dims[nr_of_dims];
+ nr_of_dims++;
+ if (nr_of_dims == V4L2_CTRL_MAX_DIMS)
+ break;
+ }
+ is_array = nr_of_dims > 0;
+
+ /* Prefill elem_size for all types handled by std_type_ops */
+ switch ((u32)type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ elem_size = sizeof(s64);
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ elem_size = max + 1;
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ elem_size = sizeof(u8);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ elem_size = sizeof(u16);
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ elem_size = sizeof(u32);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_sequence);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_picture);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantisation);
+ break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_fwht_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_sps);
+ break;
+ case V4L2_CTRL_TYPE_H264_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_pps);
+ break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix);
+ break;
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_decode_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights);
+ break;
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ elem_size = sizeof(struct v4l2_ctrl_vp8_frame);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_sps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_pps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params);
+ break;
+ case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
+ elem_size = sizeof(struct v4l2_ctrl_hdr10_cll_info);
+ break;
+ case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
+ elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display);
+ break;
+ case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
+ elem_size = sizeof(struct v4l2_ctrl_vp9_compressed_hdr);
+ break;
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ elem_size = sizeof(struct v4l2_ctrl_vp9_frame);
+ break;
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ elem_size = sizeof(struct v4l2_ctrl_av1_sequence);
+ break;
+ case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
+ elem_size = sizeof(struct v4l2_ctrl_av1_tile_group_entry);
+ break;
+ case V4L2_CTRL_TYPE_AV1_FRAME:
+ elem_size = sizeof(struct v4l2_ctrl_av1_frame);
+ break;
+ case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
+ elem_size = sizeof(struct v4l2_ctrl_av1_film_grain);
+ break;
+ case V4L2_CTRL_TYPE_AREA:
+ elem_size = sizeof(struct v4l2_area);
+ break;
+ case V4L2_CTRL_TYPE_RECT:
+ elem_size = sizeof(struct v4l2_rect);
+ break;
+ default:
+ if (type < V4L2_CTRL_COMPOUND_TYPES)
+ elem_size = sizeof(s32);
+ break;
+ }
+
+ if (type < V4L2_CTRL_COMPOUND_TYPES &&
+ type != V4L2_CTRL_TYPE_BUTTON &&
+ type != V4L2_CTRL_TYPE_CTRL_CLASS &&
+ type != V4L2_CTRL_TYPE_STRING)
+ flags |= V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX;
+
+ /* Sanity checks */
+ if (id == 0 || name == NULL || !elem_size ||
+ id >= V4L2_CID_PRIVATE_BASE ||
+ (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
+ (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
+
+ err = check_range(type, min, max, step, def);
+ if (err) {
+ handler_set_err(hdl, err);
+ return NULL;
+ }
+ if (is_array &&
+ (type == V4L2_CTRL_TYPE_BUTTON ||
+ type == V4L2_CTRL_TYPE_CTRL_CLASS)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ if (flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) {
+ /*
+ * For now only support this for one-dimensional arrays only.
+ *
+ * This can be relaxed in the future, but this will
+ * require more effort.
+ */
+ if (nr_of_dims != 1) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ /* Start with just 1 element */
+ elems = 1;
+ }
+
+ tot_ctrl_size = elem_size * elems;
+ sz_extra = 0;
+ if (type == V4L2_CTRL_TYPE_BUTTON)
+ flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ else if (!is_array &&
+ (type == V4L2_CTRL_TYPE_INTEGER64 ||
+ type == V4L2_CTRL_TYPE_STRING ||
+ type >= V4L2_CTRL_COMPOUND_TYPES))
+ sz_extra += 2 * tot_ctrl_size;
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
+ sz_extra += elem_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_min.p_const)
+ sz_extra += elem_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_max.p_const)
+ sz_extra += elem_size;
+
+ ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
+ if (ctrl == NULL) {
+ handler_set_err(hdl, -ENOMEM);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ctrl->node);
+ INIT_LIST_HEAD(&ctrl->ev_subs);
+ ctrl->handler = hdl;
+ ctrl->ops = ops;
+ ctrl->type_ops = type_ops ? type_ops : &std_type_ops;
+ ctrl->id = id;
+ ctrl->name = name;
+ ctrl->type = type;
+ ctrl->flags = flags;
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING;
+ ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string;
+ ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64;
+ ctrl->is_array = is_array;
+ ctrl->is_dyn_array = !!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY);
+ ctrl->elems = elems;
+ ctrl->new_elems = elems;
+ ctrl->nr_of_dims = nr_of_dims;
+ if (nr_of_dims)
+ memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0]));
+ ctrl->elem_size = elem_size;
+ if (type == V4L2_CTRL_TYPE_MENU)
+ ctrl->qmenu = qmenu;
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ctrl->qmenu_int = qmenu_int;
+ ctrl->priv = priv;
+ ctrl->cur.val = ctrl->val = def;
+ data = &ctrl[1];
+
+ if (ctrl->is_array) {
+ ctrl->p_array_alloc_elems = elems;
+ ctrl->p_array = kvzalloc(2 * elems * elem_size, GFP_KERNEL);
+ if (!ctrl->p_array) {
+ kvfree(ctrl);
+ return NULL;
+ }
+ data = ctrl->p_array;
+ }
+
+ if (!ctrl->is_int) {
+ ctrl->p_new.p = data;
+ ctrl->p_cur.p = data + tot_ctrl_size;
+ } else {
+ ctrl->p_new.p = &ctrl->val;
+ ctrl->p_cur.p = &ctrl->cur.val;
+ }
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
+ if (ctrl->is_array)
+ ctrl->p_def.p = &ctrl[1];
+ else
+ ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
+ memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
+ }
+
+ if (flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) {
+ void *ptr = ctrl->p_def.p;
+
+ if (p_min.p_const) {
+ ptr += elem_size;
+ ctrl->p_min.p = ptr;
+ memcpy(ctrl->p_min.p, p_min.p_const, elem_size);
+ }
+
+ if (p_max.p_const) {
+ ptr += elem_size;
+ ctrl->p_max.p = ptr;
+ memcpy(ctrl->p_max.p, p_max.p_const, elem_size);
+ }
+ }
+
+ ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
+ cur_to_new(ctrl);
+
+ if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
+ kvfree(ctrl->p_array);
+ kvfree(ctrl);
+ return NULL;
+ }
+ mutex_lock(hdl->lock);
+ list_add_tail(&ctrl->node, &hdl->ctrls);
+ mutex_unlock(hdl->lock);
+ return ctrl;
+}
+
+struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_config *cfg, void *priv)
+{
+ bool is_menu;
+ struct v4l2_ctrl *ctrl;
+ const char *name = cfg->name;
+ const char * const *qmenu = cfg->qmenu;
+ const s64 *qmenu_int = cfg->qmenu_int;
+ enum v4l2_ctrl_type type = cfg->type;
+ u32 flags = cfg->flags;
+ s64 min = cfg->min;
+ s64 max = cfg->max;
+ u64 step = cfg->step;
+ s64 def = cfg->def;
+
+ if (name == NULL)
+ v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
+ &def, &flags);
+
+ is_menu = (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ if (is_menu)
+ WARN_ON(step);
+ else
+ WARN_ON(cfg->menu_skip_mask);
+ if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
+ qmenu = v4l2_ctrl_get_menu(cfg->id);
+ } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name,
+ type, min, max,
+ is_menu ? cfg->menu_skip_mask : step, def,
+ cfg->dims, cfg->elem_size,
+ flags, qmenu, qmenu_int, cfg->p_def, cfg->p_min,
+ cfg->p_max, priv);
+ if (ctrl)
+ ctrl->is_private = cfg->is_private;
+ return ctrl;
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_custom);
+
+/* Helper function for standard non-menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s64 min, s64 max, u64 step, s64 def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU ||
+ type >= V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, ptr_null, ptr_null,
+ ptr_null, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std);
+
+/* Helper function for standard menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, u8 _max, u64 mask, u8 _def)
+{
+ const char * const *qmenu = NULL;
+ const s64 *qmenu_int = NULL;
+ unsigned int qmenu_int_len = 0;
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s64 min;
+ s64 max = _max;
+ s64 def = _def;
+ u64 step;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+
+ if (type == V4L2_CTRL_TYPE_MENU)
+ qmenu = v4l2_ctrl_get_menu(id);
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
+
+ if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, mask, def, NULL, 0,
+ flags, qmenu, qmenu_int, ptr_null, ptr_null,
+ ptr_null, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
+
+/* Helper function for standard menu controls with driver defined menu */
+struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id, u8 _max,
+ u64 mask, u8 _def, const char * const *qmenu)
+{
+ enum v4l2_ctrl_type type;
+ const char *name;
+ u32 flags;
+ u64 step;
+ s64 min;
+ s64 max = _max;
+ s64 def = _def;
+
+ /* v4l2_ctrl_new_std_menu_items() should only be called for
+ * standard controls without a standard menu.
+ */
+ if (v4l2_ctrl_get_menu(id)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, mask, def, NULL, 0,
+ flags, qmenu, NULL, ptr_null, ptr_null,
+ ptr_null, NULL);
+
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
+
+/* Helper function for standard compound controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id,
+ const union v4l2_ctrl_ptr p_def,
+ const union v4l2_ctrl_ptr p_min,
+ const union v4l2_ctrl_ptr p_max)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+ s64 min, max, step, def;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type < V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, p_def, p_min, p_max, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
+
+/* Helper function for standard integer menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, u8 _max, u8 _def, const s64 *qmenu_int)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s64 min;
+ u64 step;
+ s64 max = _max;
+ s64 def = _def;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, 0, def, NULL, 0,
+ flags, NULL, qmenu_int, ptr_null, ptr_null,
+ ptr_null, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
+
+/* Add the controls from another handler to our own. */
+int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *add,
+ bool (*filter)(const struct v4l2_ctrl *ctrl),
+ bool from_other_dev)
+{
+ struct v4l2_ctrl_ref *ref;
+ int ret = 0;
+
+ /* Do nothing if either handler is NULL or if they are the same */
+ if (!hdl || !add || hdl == add)
+ return 0;
+ if (hdl->error)
+ return hdl->error;
+ mutex_lock(add->lock);
+ list_for_each_entry(ref, &add->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+
+ /* Skip handler-private controls. */
+ if (ctrl->is_private)
+ continue;
+ /* And control classes */
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ continue;
+ /* Filter any unwanted controls */
+ if (filter && !filter(ctrl))
+ continue;
+ ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false);
+ if (ret)
+ break;
+ }
+ mutex_unlock(add->lock);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_add_handler);
+
+bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
+{
+ if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
+ return true;
+ if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
+ return true;
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+EXPORT_SYMBOL(v4l2_ctrl_radio_filter);
+
+/* Cluster controls */
+void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
+{
+ bool has_volatiles = false;
+ int i;
+
+ /* The first control is the master control and it must not be NULL */
+ if (WARN_ON(ncontrols == 0 || controls[0] == NULL))
+ return;
+
+ for (i = 0; i < ncontrols; i++) {
+ if (controls[i]) {
+ controls[i]->cluster = controls;
+ controls[i]->ncontrols = ncontrols;
+ if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
+ has_volatiles = true;
+ }
+ }
+ controls[0]->has_volatiles = has_volatiles;
+}
+EXPORT_SYMBOL(v4l2_ctrl_cluster);
+
+void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
+ u8 manual_val, bool set_volatile)
+{
+ struct v4l2_ctrl *master = controls[0];
+ u32 flag = 0;
+ int i;
+
+ v4l2_ctrl_cluster(ncontrols, controls);
+ WARN_ON(ncontrols <= 1);
+ WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
+ WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
+ master->is_auto = true;
+ master->has_volatiles = set_volatile;
+ master->manual_mode_value = manual_val;
+ master->flags |= V4L2_CTRL_FLAG_UPDATE;
+
+ if (!is_cur_manual(master))
+ flag = V4L2_CTRL_FLAG_INACTIVE |
+ (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
+
+ for (i = 1; i < ncontrols; i++)
+ if (controls[i])
+ controls[i]->flags |= flag;
+}
+EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
+
+/*
+ * Obtain the current volatile values of an autocluster and mark them
+ * as new.
+ */
+void update_from_auto_cluster(struct v4l2_ctrl *master)
+{
+ int i;
+
+ for (i = 1; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ if (!call_op(master, g_volatile_ctrl))
+ for (i = 1; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->is_new = 1;
+}
+
+/*
+ * Return non-zero if one or more of the controls in the cluster has a new
+ * value that differs from the current value.
+ */
+static int cluster_changed(struct v4l2_ctrl *master)
+{
+ bool changed = false;
+ int i;
+
+ for (i = 0; i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+ bool ctrl_changed = false;
+
+ if (!ctrl)
+ continue;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE) {
+ changed = true;
+ ctrl_changed = true;
+ }
+
+ /*
+ * Set has_changed to false to avoid generating
+ * the event V4L2_EVENT_CTRL_CH_VALUE
+ */
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ ctrl->has_changed = false;
+ continue;
+ }
+
+ if (ctrl->elems != ctrl->new_elems)
+ ctrl_changed = true;
+ if (!ctrl_changed)
+ ctrl_changed = !ctrl->type_ops->equal(ctrl,
+ ctrl->p_cur, ctrl->p_new);
+ ctrl->has_changed = ctrl_changed;
+ changed |= ctrl->has_changed;
+ }
+ return changed;
+}
+
+/*
+ * Core function that calls try/s_ctrl and ensures that the new value is
+ * copied to the current value on a set.
+ * Must be called with ctrl->handler->lock held.
+ */
+int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags)
+{
+ bool update_flag;
+ int ret;
+ int i;
+
+ /*
+ * Go through the cluster and either validate the new value or
+ * (if no new value was set), copy the current value to the new
+ * value, ensuring a consistent view for the control ops when
+ * called.
+ */
+ for (i = 0; i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+
+ if (!ctrl)
+ continue;
+
+ if (!ctrl->is_new) {
+ cur_to_new(ctrl);
+ continue;
+ }
+ /*
+ * Check again: it may have changed since the
+ * previous check in try_or_set_ext_ctrls().
+ */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ return -EBUSY;
+ }
+
+ ret = call_op(master, try_ctrl);
+
+ /* Don't set if there is no change */
+ if (ret || !set || !cluster_changed(master))
+ return ret;
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ return ret;
+
+ /* If OK, then make the new values permanent. */
+ update_flag = is_cur_manual(master) != is_new_manual(master);
+
+ for (i = 0; i < master->ncontrols; i++) {
+ /*
+ * If we switch from auto to manual mode, and this cluster
+ * contains volatile controls, then all non-master controls
+ * have to be marked as changed. The 'new' value contains
+ * the volatile value (obtained by update_from_auto_cluster),
+ * which now has to become the current value.
+ */
+ if (i && update_flag && is_new_manual(master) &&
+ master->has_volatiles && master->cluster[i])
+ master->cluster[i]->has_changed = true;
+
+ new_to_cur(fh, master->cluster[i], ch_flags |
+ ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ }
+ return 0;
+}
+
+/* Activate/deactivate a control. */
+void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
+{
+ /* invert since the actual flag is called 'inactive' */
+ bool inactive = !active;
+ bool old;
+
+ if (ctrl == NULL)
+ return;
+
+ if (inactive)
+ /* set V4L2_CTRL_FLAG_INACTIVE */
+ old = test_and_set_bit(4, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_INACTIVE */
+ old = test_and_clear_bit(4, &ctrl->flags);
+ if (old != inactive)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+}
+EXPORT_SYMBOL(v4l2_ctrl_activate);
+
+void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
+{
+ bool old;
+
+ if (ctrl == NULL)
+ return;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ if (grabbed)
+ /* set V4L2_CTRL_FLAG_GRABBED */
+ old = test_and_set_bit(1, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_GRABBED */
+ old = test_and_clear_bit(1, &ctrl->flags);
+ if (old != grabbed)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_grab);
+
+/* Call s_ctrl for all controls owned by the handler */
+int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ if (hdl == NULL)
+ return 0;
+
+ lockdep_assert_held(hdl->lock);
+
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ ctrl->done = false;
+
+ list_for_each_entry(ctrl, &hdl->ctrls, node) {
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int i;
+
+ /* Skip if this control was already handled by a cluster. */
+ /* Skip button controls and read-only controls. */
+ if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ cur_to_new(master->cluster[i]);
+ master->cluster[i]->is_new = 1;
+ master->cluster[i]->done = true;
+ }
+ }
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup);
+
+int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+{
+ int ret;
+
+ if (hdl == NULL)
+ return 0;
+
+ mutex_lock(hdl->lock);
+ ret = __v4l2_ctrl_handler_setup(hdl);
+ mutex_unlock(hdl->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
+
+/* Log the control name and value */
+static void log_ctrl(const struct v4l2_ctrl *ctrl,
+ const char *prefix, const char *colon)
+{
+ if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
+ return;
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ return;
+
+ pr_info("%s%s%s: ", prefix, colon, ctrl->name);
+
+ ctrl->type_ops->log(ctrl);
+
+ if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
+ V4L2_CTRL_FLAG_GRABBED |
+ V4L2_CTRL_FLAG_VOLATILE)) {
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ pr_cont(" inactive");
+ if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
+ pr_cont(" grabbed");
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
+ pr_cont(" volatile");
+ }
+ pr_cont("\n");
+}
+
+/* Log all controls owned by the handler */
+void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
+ const char *prefix)
+{
+ struct v4l2_ctrl *ctrl;
+ const char *colon = "";
+ int len;
+
+ if (!hdl)
+ return;
+ if (!prefix)
+ prefix = "";
+ len = strlen(prefix);
+ if (len && prefix[len - 1] != ' ')
+ colon = ": ";
+ mutex_lock(hdl->lock);
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
+ log_ctrl(ctrl, prefix, colon);
+ mutex_unlock(hdl->lock);
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
+
+int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ctrl_ops,
+ const struct v4l2_fwnode_device_properties *p)
+{
+ if (hdl->error)
+ return hdl->error;
+
+ if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) {
+ u32 orientation_ctrl;
+
+ switch (p->orientation) {
+ case V4L2_FWNODE_ORIENTATION_FRONT:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT;
+ break;
+ case V4L2_FWNODE_ORIENTATION_BACK:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK;
+ break;
+ case V4L2_FWNODE_ORIENTATION_EXTERNAL:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops,
+ V4L2_CID_CAMERA_ORIENTATION,
+ V4L2_CAMERA_ORIENTATION_EXTERNAL, 0,
+ orientation_ctrl))
+ return hdl->error;
+ }
+
+ if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) {
+ if (!v4l2_ctrl_new_std(hdl, ctrl_ops,
+ V4L2_CID_CAMERA_SENSOR_ROTATION,
+ p->rotation, p->rotation, 1,
+ p->rotation))
+ return hdl->error;
+ }
+
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
new file mode 100644
index 000000000000..ad41f65374e2
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -0,0 +1,1685 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework control definitions.
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
+ */
+
+#include <linux/export.h>
+#include <media/v4l2-ctrls.h>
+
+/*
+ * Returns NULL or a character pointer array containing the menu for
+ * the given control ID. The pointer array ends with a NULL pointer.
+ * An empty string signifies a menu entry that is invalid. This allows
+ * drivers to disable certain options if it is not supported.
+ */
+const char * const *v4l2_ctrl_get_menu(u32 id)
+{
+ static const char * const mpeg_audio_sampling_freq[] = {
+ "44.1 kHz",
+ "48 kHz",
+ "32 kHz",
+ NULL
+ };
+ static const char * const mpeg_audio_encoding[] = {
+ "MPEG-1/2 Layer I",
+ "MPEG-1/2 Layer II",
+ "MPEG-1/2 Layer III",
+ "MPEG-2/4 AAC",
+ "AC-3",
+ NULL
+ };
+ static const char * const mpeg_audio_l1_bitrate[] = {
+ "32 kbps",
+ "64 kbps",
+ "96 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "288 kbps",
+ "320 kbps",
+ "352 kbps",
+ "384 kbps",
+ "416 kbps",
+ "448 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_l2_bitrate[] = {
+ "32 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_l3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_ac3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ "448 kbps",
+ "512 kbps",
+ "576 kbps",
+ "640 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_mode[] = {
+ "Stereo",
+ "Joint Stereo",
+ "Dual",
+ "Mono",
+ NULL
+ };
+ static const char * const mpeg_audio_mode_extension[] = {
+ "Bound 4",
+ "Bound 8",
+ "Bound 12",
+ "Bound 16",
+ NULL
+ };
+ static const char * const mpeg_audio_emphasis[] = {
+ "No Emphasis",
+ "50/15 us",
+ "CCITT J17",
+ NULL
+ };
+ static const char * const mpeg_audio_crc[] = {
+ "No CRC",
+ "16-bit CRC",
+ NULL
+ };
+ static const char * const mpeg_audio_dec_playback[] = {
+ "Auto",
+ "Stereo",
+ "Left",
+ "Right",
+ "Mono",
+ "Swapped Stereo",
+ NULL
+ };
+ static const char * const mpeg_video_encoding[] = {
+ "MPEG-1",
+ "MPEG-2",
+ "MPEG-4 AVC",
+ NULL
+ };
+ static const char * const mpeg_video_aspect[] = {
+ "1x1",
+ "4x3",
+ "16x9",
+ "2.21x1",
+ NULL
+ };
+ static const char * const mpeg_video_bitrate_mode[] = {
+ "Variable Bitrate",
+ "Constant Bitrate",
+ "Constant Quality",
+ NULL
+ };
+ static const char * const mpeg_stream_type[] = {
+ "MPEG-2 Program Stream",
+ "MPEG-2 Transport Stream",
+ "MPEG-1 System Stream",
+ "MPEG-2 DVD-compatible Stream",
+ "MPEG-1 VCD-compatible Stream",
+ "MPEG-2 SVCD-compatible Stream",
+ NULL
+ };
+ static const char * const mpeg_stream_vbi_fmt[] = {
+ "No VBI",
+ "Private Packet, IVTV Format",
+ NULL
+ };
+ static const char * const camera_power_line_frequency[] = {
+ "Disabled",
+ "50 Hz",
+ "60 Hz",
+ "Auto",
+ NULL
+ };
+ static const char * const camera_exposure_auto[] = {
+ "Auto Mode",
+ "Manual Mode",
+ "Shutter Priority Mode",
+ "Aperture Priority Mode",
+ NULL
+ };
+ static const char * const camera_exposure_metering[] = {
+ "Average",
+ "Center Weighted",
+ "Spot",
+ "Matrix",
+ NULL
+ };
+ static const char * const camera_auto_focus_range[] = {
+ "Auto",
+ "Normal",
+ "Macro",
+ "Infinity",
+ NULL
+ };
+ static const char * const colorfx[] = {
+ "None",
+ "Black & White",
+ "Sepia",
+ "Negative",
+ "Emboss",
+ "Sketch",
+ "Sky Blue",
+ "Grass Green",
+ "Skin Whiten",
+ "Vivid",
+ "Aqua",
+ "Art Freeze",
+ "Silhouette",
+ "Solarization",
+ "Antique",
+ "Set Cb/Cr",
+ NULL
+ };
+ static const char * const auto_n_preset_white_balance[] = {
+ "Manual",
+ "Auto",
+ "Incandescent",
+ "Fluorescent",
+ "Fluorescent H",
+ "Horizon",
+ "Daylight",
+ "Flash",
+ "Cloudy",
+ "Shade",
+ NULL,
+ };
+ static const char * const camera_iso_sensitivity_auto[] = {
+ "Manual",
+ "Auto",
+ NULL
+ };
+ static const char * const scene_mode[] = {
+ "None",
+ "Backlight",
+ "Beach/Snow",
+ "Candle Light",
+ "Dusk/Dawn",
+ "Fall Colors",
+ "Fireworks",
+ "Landscape",
+ "Night",
+ "Party/Indoor",
+ "Portrait",
+ "Sports",
+ "Sunset",
+ "Text",
+ NULL
+ };
+ static const char * const tune_emphasis[] = {
+ "None",
+ "50 Microseconds",
+ "75 Microseconds",
+ NULL,
+ };
+ static const char * const header_mode[] = {
+ "Separate Buffer",
+ "Joined With 1st Frame",
+ NULL,
+ };
+ static const char * const multi_slice[] = {
+ "Single",
+ "Max Macroblocks",
+ "Max Bytes",
+ NULL,
+ };
+ static const char * const entropy_mode[] = {
+ "CAVLC",
+ "CABAC",
+ NULL,
+ };
+ static const char * const mpeg_h264_level[] = {
+ "1",
+ "1b",
+ "1.1",
+ "1.2",
+ "1.3",
+ "2",
+ "2.1",
+ "2.2",
+ "3",
+ "3.1",
+ "3.2",
+ "4",
+ "4.1",
+ "4.2",
+ "5",
+ "5.1",
+ "5.2",
+ "6.0",
+ "6.1",
+ "6.2",
+ NULL,
+ };
+ static const char * const h264_loop_filter[] = {
+ "Enabled",
+ "Disabled",
+ "Disabled at Slice Boundary",
+ NULL,
+ };
+ static const char * const h264_profile[] = {
+ "Baseline",
+ "Constrained Baseline",
+ "Main",
+ "Extended",
+ "High",
+ "High 10",
+ "High 422",
+ "High 444 Predictive",
+ "High 10 Intra",
+ "High 422 Intra",
+ "High 444 Intra",
+ "CAVLC 444 Intra",
+ "Scalable Baseline",
+ "Scalable High",
+ "Scalable High Intra",
+ "Stereo High",
+ "Multiview High",
+ "Constrained High",
+ NULL,
+ };
+ static const char * const vui_sar_idc[] = {
+ "Unspecified",
+ "1:1",
+ "12:11",
+ "10:11",
+ "16:11",
+ "40:33",
+ "24:11",
+ "20:11",
+ "32:11",
+ "80:33",
+ "18:11",
+ "15:11",
+ "64:33",
+ "160:99",
+ "4:3",
+ "3:2",
+ "2:1",
+ "Extended SAR",
+ NULL,
+ };
+ static const char * const h264_fp_arrangement_type[] = {
+ "Checkerboard",
+ "Column",
+ "Row",
+ "Side by Side",
+ "Top Bottom",
+ "Temporal",
+ NULL,
+ };
+ static const char * const h264_fmo_map_type[] = {
+ "Interleaved Slices",
+ "Scattered Slices",
+ "Foreground with Leftover",
+ "Box Out",
+ "Raster Scan",
+ "Wipe Scan",
+ "Explicit",
+ NULL,
+ };
+ static const char * const h264_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const h264_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
+ static const char * const h264_hierarchical_coding_type[] = {
+ "Hier Coding B",
+ "Hier Coding P",
+ NULL,
+ };
+ static const char * const mpeg_mpeg2_level[] = {
+ "Low",
+ "Main",
+ "High 1440",
+ "High",
+ NULL,
+ };
+ static const char * const mpeg2_profile[] = {
+ "Simple",
+ "Main",
+ "SNR Scalable",
+ "Spatially Scalable",
+ "High",
+ NULL,
+ };
+ static const char * const mpeg_mpeg4_level[] = {
+ "0",
+ "0b",
+ "1",
+ "2",
+ "3",
+ "3b",
+ "4",
+ "5",
+ NULL,
+ };
+ static const char * const mpeg4_profile[] = {
+ "Simple",
+ "Advanced Simple",
+ "Core",
+ "Simple Scalable",
+ "Advanced Coding Efficiency",
+ NULL,
+ };
+
+ static const char * const vpx_golden_frame_sel[] = {
+ "Use Previous Frame",
+ "Use Previous Specific Frame",
+ NULL,
+ };
+ static const char * const vp8_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
+ static const char * const vp9_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
+ static const char * const vp9_level[] = {
+ "1",
+ "1.1",
+ "2",
+ "2.1",
+ "3",
+ "3.1",
+ "4",
+ "4.1",
+ "5",
+ "5.1",
+ "5.2",
+ "6",
+ "6.1",
+ "6.2",
+ NULL,
+ };
+
+ static const char * const flash_led_mode[] = {
+ "Off",
+ "Flash",
+ "Torch",
+ NULL,
+ };
+ static const char * const flash_strobe_source[] = {
+ "Software",
+ "External",
+ NULL,
+ };
+
+ static const char * const jpeg_chroma_subsampling[] = {
+ "4:4:4",
+ "4:2:2",
+ "4:2:0",
+ "4:1:1",
+ "4:1:0",
+ "Gray",
+ NULL,
+ };
+ static const char * const dv_tx_mode[] = {
+ "DVI-D",
+ "HDMI",
+ NULL,
+ };
+ static const char * const dv_rgb_range[] = {
+ "Automatic",
+ "RGB Limited Range (16-235)",
+ "RGB Full Range (0-255)",
+ NULL,
+ };
+ static const char * const dv_it_content_type[] = {
+ "Graphics",
+ "Photo",
+ "Cinema",
+ "Game",
+ "No IT Content",
+ NULL,
+ };
+ static const char * const detect_md_mode[] = {
+ "Disabled",
+ "Global",
+ "Threshold Grid",
+ "Region Grid",
+ NULL,
+ };
+
+ static const char * const av1_profile[] = {
+ "Main",
+ "High",
+ "Professional",
+ NULL,
+ };
+ static const char * const av1_level[] = {
+ "2.0",
+ "2.1",
+ "2.2",
+ "2.3",
+ "3.0",
+ "3.1",
+ "3.2",
+ "3.3",
+ "4.0",
+ "4.1",
+ "4.2",
+ "4.3",
+ "5.0",
+ "5.1",
+ "5.2",
+ "5.3",
+ "6.0",
+ "6.1",
+ "6.2",
+ "6.3",
+ "7.0",
+ "7.1",
+ "7.2",
+ "7.3",
+ NULL,
+ };
+
+ static const char * const hevc_profile[] = {
+ "Main",
+ "Main Still Picture",
+ "Main 10",
+ NULL,
+ };
+ static const char * const hevc_level[] = {
+ "1",
+ "2",
+ "2.1",
+ "3",
+ "3.1",
+ "4",
+ "4.1",
+ "5",
+ "5.1",
+ "5.2",
+ "6",
+ "6.1",
+ "6.2",
+ NULL,
+ };
+ static const char * const hevc_hierarchial_coding_type[] = {
+ "B",
+ "P",
+ NULL,
+ };
+ static const char * const hevc_refresh_type[] = {
+ "None",
+ "CRA",
+ "IDR",
+ NULL,
+ };
+ static const char * const hevc_size_of_length_field[] = {
+ "0",
+ "1",
+ "2",
+ "4",
+ NULL,
+ };
+ static const char * const hevc_tier[] = {
+ "Main",
+ "High",
+ NULL,
+ };
+ static const char * const hevc_loop_filter_mode[] = {
+ "Disabled",
+ "Enabled",
+ "Disabled at slice boundary",
+ "NULL",
+ };
+ static const char * const hevc_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const hevc_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
+ static const char * const camera_orientation[] = {
+ "Front",
+ "Back",
+ "External",
+ NULL,
+ };
+ static const char * const mpeg_video_frame_skip[] = {
+ "Disabled",
+ "Level Limit",
+ "VBV/CPB Limit",
+ NULL,
+ };
+ static const char * const intra_refresh_period_type[] = {
+ "Random",
+ "Cyclic",
+ NULL,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ return mpeg_audio_sampling_freq;
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ return mpeg_audio_encoding;
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ return mpeg_audio_l1_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ return mpeg_audio_l2_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ return mpeg_audio_l3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ return mpeg_audio_ac3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ return mpeg_audio_mode;
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ return mpeg_audio_mode_extension;
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ return mpeg_audio_emphasis;
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ return mpeg_audio_crc;
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
+ return mpeg_audio_dec_playback;
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ return mpeg_video_encoding;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return mpeg_video_aspect;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ return mpeg_video_bitrate_mode;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ return mpeg_stream_type;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ return mpeg_stream_vbi_fmt;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ return camera_power_line_frequency;
+ case V4L2_CID_EXPOSURE_AUTO:
+ return camera_exposure_auto;
+ case V4L2_CID_EXPOSURE_METERING:
+ return camera_exposure_metering;
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ return camera_auto_focus_range;
+ case V4L2_CID_COLORFX:
+ return colorfx;
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ return auto_n_preset_white_balance;
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ return camera_iso_sensitivity_auto;
+ case V4L2_CID_SCENE_MODE:
+ return scene_mode;
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ return tune_emphasis;
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ return tune_emphasis;
+ case V4L2_CID_FLASH_LED_MODE:
+ return flash_led_mode;
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ return flash_strobe_source;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ return header_mode;
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ return mpeg_video_frame_skip;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ return multi_slice;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ return entropy_mode;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ return mpeg_h264_level;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ return h264_loop_filter;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ return h264_profile;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ return vui_sar_idc;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ return h264_fp_arrangement_type;
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ return h264_fmo_map_type;
+ case V4L2_CID_STATELESS_H264_DECODE_MODE:
+ return h264_decode_mode;
+ case V4L2_CID_STATELESS_H264_START_CODE:
+ return h264_start_code;
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
+ return h264_hierarchical_coding_type;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
+ return mpeg_mpeg2_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
+ return mpeg2_profile;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ return mpeg_mpeg4_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ return mpeg4_profile;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ return vpx_golden_frame_sel;
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ return vp8_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ return vp9_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ return vp9_level;
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ return jpeg_chroma_subsampling;
+ case V4L2_CID_DV_TX_MODE:
+ return dv_tx_mode;
+ case V4L2_CID_DV_TX_RGB_RANGE:
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ return dv_rgb_range;
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ return dv_it_content_type;
+ case V4L2_CID_DETECT_MD_MODE:
+ return detect_md_mode;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ return hevc_profile;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ return hevc_level;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
+ return hevc_hierarchial_coding_type;
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
+ return hevc_refresh_type;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
+ return hevc_size_of_length_field;
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
+ return hevc_tier;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ return hevc_loop_filter_mode;
+ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
+ return av1_profile;
+ case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
+ return av1_level;
+ case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
+ return hevc_decode_mode;
+ case V4L2_CID_STATELESS_HEVC_START_CODE:
+ return hevc_start_code;
+ case V4L2_CID_CAMERA_ORIENTATION:
+ return camera_orientation;
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE:
+ return intra_refresh_period_type;
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_menu);
+
+#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); (arr); })
+/*
+ * Returns NULL or an s64 type array containing the menu for given
+ * control ID. The total number of the menu items is returned in @len.
+ */
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
+{
+ static const s64 qmenu_int_vpx_num_partitions[] = {
+ 1, 2, 4, 8,
+ };
+
+ static const s64 qmenu_int_vpx_num_ref_frames[] = {
+ 1, 2, 3,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len);
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len);
+ default:
+ *len = 0;
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
+
+/* Return the control name. */
+const char *v4l2_ctrl_get_name(u32 id)
+{
+ switch (id) {
+ /* USER controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_USER_CLASS: return "User Controls";
+ case V4L2_CID_BRIGHTNESS: return "Brightness";
+ case V4L2_CID_CONTRAST: return "Contrast";
+ case V4L2_CID_SATURATION: return "Saturation";
+ case V4L2_CID_HUE: return "Hue";
+ case V4L2_CID_AUDIO_VOLUME: return "Volume";
+ case V4L2_CID_AUDIO_BALANCE: return "Balance";
+ case V4L2_CID_AUDIO_BASS: return "Bass";
+ case V4L2_CID_AUDIO_TREBLE: return "Treble";
+ case V4L2_CID_AUDIO_MUTE: return "Mute";
+ case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
+ case V4L2_CID_BLACK_LEVEL: return "Black Level";
+ case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
+ case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
+ case V4L2_CID_RED_BALANCE: return "Red Balance";
+ case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
+ case V4L2_CID_GAMMA: return "Gamma";
+ case V4L2_CID_EXPOSURE: return "Exposure";
+ case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
+ case V4L2_CID_GAIN: return "Gain";
+ case V4L2_CID_HFLIP: return "Horizontal Flip";
+ case V4L2_CID_VFLIP: return "Vertical Flip";
+ case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
+ case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
+ case V4L2_CID_SHARPNESS: return "Sharpness";
+ case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
+ case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
+ case V4L2_CID_COLOR_KILLER: return "Color Killer";
+ case V4L2_CID_COLORFX: return "Color Effects";
+ case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
+ case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
+ case V4L2_CID_ROTATE: return "Rotate";
+ case V4L2_CID_BG_COLOR: return "Background Color";
+ case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
+ case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
+ case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
+ case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
+ case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
+ case V4L2_CID_COLORFX_RGB: return "Color Effects, RGB";
+
+ /*
+ * Codec controls
+ *
+ * The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical.
+ *
+ * Keep the order of the 'case's the same as in videodev2.h!
+ */
+ case V4L2_CID_CODEC_CLASS: return "Codec Controls";
+ case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
+ case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
+ case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
+ case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
+ case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
+ case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
+ case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
+ case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback";
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback";
+ case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
+ case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
+ case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality";
+ case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
+ case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
+ case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
+ case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return "Intra Refresh Period Type";
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: return "Intra Refresh Period";
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
+ case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode";
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay";
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable";
+ case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters";
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs";
+ case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering";
+ case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP:
+ return "H264 Set QP Value for HC Layers";
+ case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
+ return "H264 Constrained Intra Pred";
+ case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
+ case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
+ case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color";
+ case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
+ case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID";
+ case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count";
+ case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index";
+ case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames";
+ case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: return "Average QP Value";
+ case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value";
+ case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value";
+
+ /* VPX controls */
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
+ case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable";
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level";
+
+ /* HEVC controls */
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level";
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier";
+ case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth";
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type";
+ case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding";
+ case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB";
+ case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID";
+ case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing";
+ case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split";
+ case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs";
+ case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode";
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
+ case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
+ case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
+
+ /* AV1 controls */
+ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return "AV1 Profile";
+ case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return "AV1 Level";
+
+ /* CAMERA controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
+ case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
+ case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
+ case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
+ case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
+ case V4L2_CID_PAN_RESET: return "Pan, Reset";
+ case V4L2_CID_TILT_RESET: return "Tilt, Reset";
+ case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
+ case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
+ case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
+ case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
+ case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
+ case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
+ case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
+ case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
+ case V4L2_CID_PRIVACY: return "Privacy";
+ case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
+ case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
+ case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
+ case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
+ case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
+ case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
+ case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
+ case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
+ case V4L2_CID_SCENE_MODE: return "Scene Mode";
+ case V4L2_CID_3A_LOCK: return "3A Lock";
+ case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
+ case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
+ case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
+ case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
+ case V4L2_CID_PAN_SPEED: return "Pan, Speed";
+ case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
+ case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
+ case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation";
+ case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation";
+ case V4L2_CID_HDR_SENSOR_MODE: return "HDR Sensor Mode";
+
+ /* FM Radio Modulator controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
+ case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
+ case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
+ case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
+ case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
+ case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
+ case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo";
+ case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head";
+ case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed";
+ case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY";
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music";
+ case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies";
+ case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies";
+ case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
+ case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
+ case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
+ case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
+ case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
+ case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
+
+ /* Flash controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_FLASH_CLASS: return "Flash Controls";
+ case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
+ case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
+ case V4L2_CID_FLASH_STROBE: return "Strobe";
+ case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
+ case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
+ case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
+ case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
+ case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
+ case V4L2_CID_FLASH_FAULT: return "Faults";
+ case V4L2_CID_FLASH_CHARGE: return "Charge";
+ case V4L2_CID_FLASH_READY: return "Ready to Strobe";
+
+ /* JPEG encoder controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls";
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling";
+ case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval";
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
+ case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
+
+ /* Image source controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
+ case V4L2_CID_VBLANK: return "Vertical Blanking";
+ case V4L2_CID_HBLANK: return "Horizontal Blanking";
+ case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
+ case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
+ case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
+ case V4L2_CID_NOTIFY_GAINS: return "Notify Gains";
+
+ /* Image processing controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
+ case V4L2_CID_LINK_FREQ: return "Link Frequency";
+ case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
+ case V4L2_CID_TEST_PATTERN: return "Test Pattern";
+ case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode";
+ case V4L2_CID_DIGITAL_GAIN: return "Digital Gain";
+
+ /* DV controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_DV_CLASS: return "Digital Video Controls";
+ case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present";
+ case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present";
+ case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present";
+ case V4L2_CID_DV_TX_MODE: return "Transmit Mode";
+ case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range";
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type";
+ case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present";
+ case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range";
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type";
+
+ case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
+ case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
+ case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
+ case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls";
+ case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain";
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto";
+ case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain";
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto";
+ case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain";
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto";
+ case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth";
+ case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock";
+ case V4L2_CID_RDS_RX_PTY: return "RDS Program Type";
+ case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name";
+ case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text";
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music";
+
+ /* Detection controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_DETECT_CLASS: return "Detection Controls";
+ case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode";
+ case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold";
+ case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid";
+ case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid";
+
+ /* Stateless Codec controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls";
+ case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode";
+ case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code";
+ case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set";
+ case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set";
+ case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix";
+ case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table";
+ case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters";
+ case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters";
+ case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters";
+ case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters";
+ case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header";
+ case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header";
+ case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices";
+ case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates";
+ case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters";
+ case V4L2_CID_STATELESS_HEVC_SPS: return "HEVC Sequence Parameter Set";
+ case V4L2_CID_STATELESS_HEVC_PPS: return "HEVC Picture Parameter Set";
+ case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix";
+ case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters";
+ case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode";
+ case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code";
+ case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets";
+ case V4L2_CID_STATELESS_AV1_SEQUENCE: return "AV1 Sequence Parameters";
+ case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: return "AV1 Tile Group Entry";
+ case V4L2_CID_STATELESS_AV1_FRAME: return "AV1 Frame Parameters";
+ case V4L2_CID_STATELESS_AV1_FILM_GRAIN: return "AV1 Film Grain";
+
+ /* Colorimetry controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls";
+ case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info";
+ case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display";
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_name);
+
+void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
+ s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags)
+{
+ *name = v4l2_ctrl_get_name(id);
+ *flags = 0;
+
+ switch (id) {
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ case V4L2_CID_AUTOGAIN:
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ case V4L2_CID_HUE_AUTO:
+ case V4L2_CID_CHROMA_AGC:
+ case V4L2_CID_COLOR_KILLER:
+ case V4L2_CID_AUTOBRIGHTNESS:
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN:
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
+ case V4L2_CID_FOCUS_AUTO:
+ case V4L2_CID_PRIVACY:
+ case V4L2_CID_AUDIO_LIMITER_ENABLED:
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
+ case V4L2_CID_PILOT_TONE_ENABLED:
+ case V4L2_CID_ILLUMINATORS_1:
+ case V4L2_CID_ILLUMINATORS_2:
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_FLASH_CHARGE:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER:
+ case V4L2_CID_MPEG_VIDEO_AU_DELIMITER:
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ case V4L2_CID_IMAGE_STABILIZATION:
+ case V4L2_CID_RDS_RECEPTION:
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ case V4L2_CID_RDS_TX_MONO_STEREO:
+ case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD:
+ case V4L2_CID_RDS_TX_COMPRESSED:
+ case V4L2_CID_RDS_TX_DYNAMIC_PTY:
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH:
+ case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE:
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH:
+ *type = V4L2_CTRL_TYPE_BOOLEAN;
+ *min = 0;
+ *max = *step = 1;
+ break;
+ case V4L2_CID_ROTATE:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_LTR_COUNT:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
+ *type = V4L2_CTRL_TYPE_BITMASK;
+ *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ case V4L2_CID_PAN_RESET:
+ case V4L2_CID_TILT_RESET:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_STROBE_STOP:
+ case V4L2_CID_AUTO_FOCUS_START:
+ case V4L2_CID_AUTO_FOCUS_STOP:
+ case V4L2_CID_DO_WHITE_BALANCE:
+ *type = V4L2_CTRL_TYPE_BUTTON;
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ case V4L2_CID_COLORFX:
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ case V4L2_CID_FLASH_LED_MODE:
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_SCENE_MODE:
+ case V4L2_CID_DV_TX_MODE:
+ case V4L2_CID_DV_TX_RGB_RANGE:
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_DEINTERLACING_MODE:
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ case V4L2_CID_DETECT_MD_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
+ case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
+ case V4L2_CID_STATELESS_HEVC_START_CODE:
+ case V4L2_CID_STATELESS_H264_DECODE_MODE:
+ case V4L2_CID_STATELESS_H264_START_CODE:
+ case V4L2_CID_CAMERA_ORIENTATION:
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE:
+ case V4L2_CID_HDR_SENSOR_MODE:
+ *type = V4L2_CTRL_TYPE_MENU;
+ break;
+ case V4L2_CID_LINK_FREQ:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
+ case V4L2_CID_RDS_TX_PS_NAME:
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ case V4L2_CID_RDS_RX_PS_NAME:
+ case V4L2_CID_RDS_RX_RADIO_TEXT:
+ *type = V4L2_CTRL_TYPE_STRING;
+ break;
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
+ case V4L2_CID_USER_CLASS:
+ case V4L2_CID_CAMERA_CLASS:
+ case V4L2_CID_CODEC_CLASS:
+ case V4L2_CID_FM_TX_CLASS:
+ case V4L2_CID_FLASH_CLASS:
+ case V4L2_CID_JPEG_CLASS:
+ case V4L2_CID_IMAGE_SOURCE_CLASS:
+ case V4L2_CID_IMAGE_PROC_CLASS:
+ case V4L2_CID_DV_CLASS:
+ case V4L2_CID_FM_RX_CLASS:
+ case V4L2_CID_RF_TUNER_CLASS:
+ case V4L2_CID_DETECT_CLASS:
+ case V4L2_CID_CODEC_STATELESS_CLASS:
+ case V4L2_CID_COLORIMETRY_CLASS:
+ *type = V4L2_CTRL_TYPE_CTRL_CLASS;
+ /* You can neither read nor write these */
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_BG_COLOR:
+ case V4L2_CID_COLORFX_RGB:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *step = 1;
+ *min = 0;
+ /* Max is calculated as RGB888 that is 2^24 - 1 */
+ *max = 0xffffff;
+ break;
+ case V4L2_CID_COLORFX_CBCR:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *step = 1;
+ *min = 0;
+ *max = 0xffff;
+ break;
+ case V4L2_CID_FLASH_FAULT:
+ case V4L2_CID_JPEG_ACTIVE_MARKER:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
+ case V4L2_CID_DV_TX_HOTPLUG:
+ case V4L2_CID_DV_TX_RXSENSE:
+ case V4L2_CID_DV_TX_EDID_PRESENT:
+ case V4L2_CID_DV_RX_POWER_PRESENT:
+ *type = V4L2_CTRL_TYPE_BITMASK;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_PTS:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *def = 0;
+ *max = 0x1ffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *def = 0;
+ *max = 0x7fffffffffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *min = 0;
+ /* default for 8 bit black, luma is 16, chroma is 128 */
+ *def = 0x8000800010LL;
+ *max = 0xffffffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_AVERAGE_QP:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_PIXEL_RATE:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_DETECT_MD_REGION_GRID:
+ *type = V4L2_CTRL_TYPE_U8;
+ break;
+ case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
+ *type = V4L2_CTRL_TYPE_U16;
+ break;
+ case V4L2_CID_RDS_TX_ALT_FREQS:
+ *type = V4L2_CTRL_TYPE_U32;
+ break;
+ case V4L2_CID_STATELESS_MPEG2_SEQUENCE:
+ *type = V4L2_CTRL_TYPE_MPEG2_SEQUENCE;
+ break;
+ case V4L2_CID_STATELESS_MPEG2_PICTURE:
+ *type = V4L2_CTRL_TYPE_MPEG2_PICTURE;
+ break;
+ case V4L2_CID_STATELESS_MPEG2_QUANTISATION:
+ *type = V4L2_CTRL_TYPE_MPEG2_QUANTISATION;
+ break;
+ case V4L2_CID_STATELESS_FWHT_PARAMS:
+ *type = V4L2_CTRL_TYPE_FWHT_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_H264_SPS:
+ *type = V4L2_CTRL_TYPE_H264_SPS;
+ break;
+ case V4L2_CID_STATELESS_H264_PPS:
+ *type = V4L2_CTRL_TYPE_H264_PPS;
+ break;
+ case V4L2_CID_STATELESS_H264_SCALING_MATRIX:
+ *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX;
+ break;
+ case V4L2_CID_STATELESS_H264_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_H264_DECODE_PARAMS:
+ *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_H264_PRED_WEIGHTS:
+ *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS;
+ break;
+ case V4L2_CID_STATELESS_VP8_FRAME:
+ *type = V4L2_CTRL_TYPE_VP8_FRAME;
+ break;
+ case V4L2_CID_STATELESS_HEVC_SPS:
+ *type = V4L2_CTRL_TYPE_HEVC_SPS;
+ break;
+ case V4L2_CID_STATELESS_HEVC_PPS:
+ *type = V4L2_CTRL_TYPE_HEVC_PPS;
+ break;
+ case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
+ *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
+ break;
+ case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX:
+ *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX;
+ break;
+ case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS:
+ *type = V4L2_CTRL_TYPE_U32;
+ *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
+ break;
+ case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR:
+ *type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR;
+ break;
+ case V4L2_CID_STATELESS_VP9_FRAME:
+ *type = V4L2_CTRL_TYPE_VP9_FRAME;
+ break;
+ case V4L2_CID_STATELESS_AV1_SEQUENCE:
+ *type = V4L2_CTRL_TYPE_AV1_SEQUENCE;
+ break;
+ case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY:
+ *type = V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY;
+ *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
+ break;
+ case V4L2_CID_STATELESS_AV1_FRAME:
+ *type = V4L2_CTRL_TYPE_AV1_FRAME;
+ break;
+ case V4L2_CID_STATELESS_AV1_FILM_GRAIN:
+ *type = V4L2_CTRL_TYPE_AV1_FILM_GRAIN;
+ break;
+ case V4L2_CID_UNIT_CELL_SIZE:
+ *type = V4L2_CTRL_TYPE_AREA;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO:
+ *type = V4L2_CTRL_TYPE_HDR10_CLL_INFO;
+ break;
+ case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY:
+ *type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY;
+ break;
+ default:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ }
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ *flags |= V4L2_CTRL_FLAG_UPDATE;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_HUE:
+ case V4L2_CID_RED_BALANCE:
+ case V4L2_CID_BLUE_BALANCE:
+ case V4L2_CID_GAMMA:
+ case V4L2_CID_SHARPNESS:
+ case V4L2_CID_CHROMA_GAIN:
+ case V4L2_CID_RDS_TX_DEVIATION:
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION:
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN:
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
+ case V4L2_CID_PILOT_TONE_DEVIATION:
+ case V4L2_CID_PILOT_TONE_FREQUENCY:
+ case V4L2_CID_TUNE_POWER_LEVEL:
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
+ case V4L2_CID_RF_TUNER_RF_GAIN:
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN:
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
+ case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD:
+ *flags |= V4L2_CTRL_FLAG_SLIDER;
+ break;
+ case V4L2_CID_PAN_RELATIVE:
+ case V4L2_CID_TILT_RELATIVE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_IRIS_RELATIVE:
+ case V4L2_CID_ZOOM_RELATIVE:
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_DV_TX_HOTPLUG:
+ case V4L2_CID_DV_TX_RXSENSE:
+ case V4L2_CID_DV_TX_EDID_PRESENT:
+ case V4L2_CID_DV_RX_POWER_PRESENT:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ case V4L2_CID_RDS_RX_PTY:
+ case V4L2_CID_RDS_RX_PS_NAME:
+ case V4L2_CID_RDS_RX_RADIO_TEXT:
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH:
+ case V4L2_CID_CAMERA_ORIENTATION:
+ case V4L2_CID_CAMERA_SENSOR_ROTATION:
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ *flags |= V4L2_CTRL_FLAG_VOLATILE;
+ break;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_fill);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-priv.h b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
new file mode 100644
index 000000000000..f4cf273ecf30
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * V4L2 controls framework private header.
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
+ */
+
+#ifndef _V4L2_CTRLS_PRIV_H_
+#define _V4L2_CTRLS_PRIV_H_
+
+#define dprintk(vdev, fmt, arg...) do { \
+ if (!WARN_ON(!(vdev)) && ((vdev)->dev_debug & V4L2_DEV_DEBUG_CTRL)) \
+ printk(KERN_DEBUG pr_fmt("%s: %s: " fmt), \
+ __func__, video_device_node_name(vdev), ##arg); \
+} while (0)
+
+#define has_op(master, op) \
+ ((master)->ops && (master)->ops->op)
+#define call_op(master, op) \
+ (has_op(master, op) ? (master)->ops->op(master) : 0)
+
+static inline u32 node2id(struct list_head *node)
+{
+ return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id;
+}
+
+/*
+ * Small helper function to determine if the autocluster is set to manual
+ * mode.
+ */
+static inline bool is_cur_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->cur.val == master->manual_mode_value;
+}
+
+/*
+ * Small helper function to determine if the autocluster will be set to manual
+ * mode.
+ */
+static inline bool is_new_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->val == master->manual_mode_value;
+}
+
+static inline u32 user_flags(const struct v4l2_ctrl *ctrl)
+{
+ u32 flags = ctrl->flags;
+
+ if (ctrl->is_ptr)
+ flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+
+ return flags;
+}
+
+/* v4l2-ctrls-core.c */
+void cur_to_new(struct v4l2_ctrl *ctrl);
+void cur_to_req(struct v4l2_ctrl_ref *ref);
+void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags);
+void new_to_req(struct v4l2_ctrl_ref *ref);
+int req_to_new(struct v4l2_ctrl_ref *ref);
+void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl);
+void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes);
+int handler_new_ref(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl,
+ struct v4l2_ctrl_ref **ctrl_ref,
+ bool from_other_dev, bool allocate_req);
+struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id);
+struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id);
+int check_range(enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def);
+void update_from_auto_cluster(struct v4l2_ctrl *master);
+int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags);
+
+/* v4l2-ctrls-api.c */
+int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev);
+int try_set_ext_ctrls_common(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev, bool set);
+
+/* v4l2-ctrls-request.c */
+void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl);
+void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl);
+int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs);
+int try_set_ext_ctrls_request(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set);
+
+#endif
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-request.c b/drivers/media/v4l2-core/v4l2-ctrls-request.c
new file mode 100644
index 000000000000..e77f722b36a4
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-request.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework Request API implementation.
+ *
+ * Copyright (C) 2018-2021 Hans Verkuil <hverkuil@kernel.org>
+ */
+
+#define pr_fmt(fmt) "v4l2-ctrls: " fmt
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+
+#include "v4l2-ctrls-priv.h"
+
+/* Initialize the request-related fields in a control handler */
+void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl)
+{
+ INIT_LIST_HEAD(&hdl->requests);
+ INIT_LIST_HEAD(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ media_request_object_init(&hdl->req_obj);
+}
+
+/* Free the request-related fields in a control handler */
+void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl_handler *req, *next_req;
+
+ /*
+ * Do nothing if this isn't the main handler or the main
+ * handler is not used in any request.
+ *
+ * The main handler can be identified by having a NULL ops pointer in
+ * the request object.
+ */
+ if (hdl->req_obj.ops || list_empty(&hdl->requests))
+ return;
+
+ /*
+ * If the main handler is freed and it is used by handler objects in
+ * outstanding requests, then unbind and put those objects before
+ * freeing the main handler.
+ */
+ list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
+ media_request_object_unbind(&req->req_obj);
+ media_request_object_put(&req->req_obj);
+ }
+}
+
+static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_handler *from)
+{
+ struct v4l2_ctrl_ref *ref;
+ int err = 0;
+
+ if (WARN_ON(!hdl || hdl == from))
+ return -EINVAL;
+
+ if (hdl->error)
+ return hdl->error;
+
+ WARN_ON(hdl->lock != &hdl->_lock);
+
+ mutex_lock(from->lock);
+ list_for_each_entry(ref, &from->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl_ref *new_ref;
+
+ /* Skip refs inherited from other devices */
+ if (ref->from_other_dev)
+ continue;
+ err = handler_new_ref(hdl, ctrl, &new_ref, false, true);
+ if (err)
+ break;
+ }
+ mutex_unlock(from->lock);
+ return err;
+}
+
+static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+
+ mutex_lock(main_hdl->lock);
+ list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
+ hdl->request_is_queued = true;
+ mutex_unlock(main_hdl->lock);
+}
+
+static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+
+ mutex_lock(main_hdl->lock);
+ list_del_init(&hdl->requests);
+ if (hdl->request_is_queued) {
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ }
+ mutex_unlock(main_hdl->lock);
+}
+
+static void v4l2_ctrl_request_release(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(hdl);
+}
+
+static const struct media_request_object_ops req_ops = {
+ .queue = v4l2_ctrl_request_queue,
+ .unbind = v4l2_ctrl_request_unbind,
+ .release = v4l2_ctrl_request_release,
+};
+
+struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
+ struct v4l2_ctrl_handler *parent)
+{
+ struct media_request_object *obj;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING &&
+ req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return NULL;
+
+ obj = media_request_object_find(req, &req_ops, parent);
+ if (obj)
+ return container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find);
+
+struct v4l2_ctrl *
+v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return (ref && ref->p_req_valid) ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
+
+static int v4l2_ctrl_request_bind(struct media_request *req,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *from)
+{
+ int ret;
+
+ ret = v4l2_ctrl_request_clone(hdl, from);
+
+ if (!ret) {
+ ret = media_request_object_bind(req, &req_ops,
+ from, false, &hdl->req_obj);
+ if (!ret) {
+ mutex_lock(from->lock);
+ list_add_tail(&hdl->requests, &from->requests);
+ mutex_unlock(from->lock);
+ }
+ }
+ return ret;
+}
+
+static struct media_request_object *
+v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl,
+ struct media_request *req, bool set)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *new_hdl;
+ int ret;
+
+ if (IS_ERR(req))
+ return ERR_CAST(req);
+
+ if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
+ return ERR_PTR(-EBUSY);
+
+ obj = media_request_object_find(req, &req_ops, hdl);
+ if (obj)
+ return obj;
+ /*
+ * If there are no controls in this completed request,
+ * then that can only happen if:
+ *
+ * 1) no controls were present in the queued request, and
+ * 2) v4l2_ctrl_request_complete() could not allocate a
+ * control handler object to store the completed state in.
+ *
+ * So return ENOMEM to indicate that there was an out-of-memory
+ * error.
+ */
+ if (!set)
+ return ERR_PTR(-ENOMEM);
+
+ new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL);
+ if (!new_hdl)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &new_hdl->req_obj;
+ ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8);
+ if (!ret)
+ ret = v4l2_ctrl_request_bind(req, new_hdl, hdl);
+ if (ret) {
+ v4l2_ctrl_handler_free(new_hdl);
+ kfree(new_hdl);
+ return ERR_PTR(ret);
+ }
+
+ media_request_object_get(obj);
+ return obj;
+}
+
+int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (!mdev || cs->request_fd < 0)
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (req->state != MEDIA_REQUEST_STATE_COMPLETE) {
+ media_request_put(req);
+ return -EACCES;
+ }
+
+ ret = media_request_lock_for_access(req);
+ if (ret) {
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, false);
+ if (IS_ERR(obj)) {
+ media_request_unlock_for_access(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev);
+
+ media_request_unlock_for_access(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+ return ret;
+}
+
+int try_set_ext_ctrls_request(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (!mdev) {
+ dprintk(vdev, "%s: missing media device\n",
+ video_device_node_name(vdev));
+ return -EINVAL;
+ }
+
+ if (cs->request_fd < 0) {
+ dprintk(vdev, "%s: invalid request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ return -EINVAL;
+ }
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req)) {
+ dprintk(vdev, "%s: cannot find request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ return PTR_ERR(req);
+ }
+
+ ret = media_request_lock_for_update(req);
+ if (ret) {
+ dprintk(vdev, "%s: cannot lock request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, set);
+ if (IS_ERR(obj)) {
+ dprintk(vdev,
+ "%s: cannot find request object for request fd %d\n",
+ video_device_node_name(vdev),
+ cs->request_fd);
+ media_request_unlock_for_update(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
+ if (ret)
+ dprintk(vdev,
+ "%s: try_set_ext_ctrls_common failed (%d)\n",
+ video_device_node_name(vdev), ret);
+
+ media_request_unlock_for_update(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+
+ return ret;
+}
+
+void v4l2_ctrl_request_complete(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+
+ if (!req || !main_hdl)
+ return;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj) {
+ int ret;
+
+ /* Create a new request so the driver can return controls */
+ hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
+ if (!hdl)
+ return;
+
+ ret = v4l2_ctrl_handler_init(hdl, (main_hdl->nr_of_buckets - 1) * 8);
+ if (!ret)
+ ret = v4l2_ctrl_request_bind(req, hdl, main_hdl);
+ if (ret) {
+ v4l2_ctrl_handler_free(hdl);
+ kfree(hdl);
+ return;
+ }
+ hdl->request_is_queued = true;
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ unsigned int i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ call_op(master, g_volatile_ctrl);
+ new_to_req(ref);
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+ if (ref->p_req_valid)
+ continue;
+
+ /* Copy the current control value into the request */
+ v4l2_ctrl_lock(ctrl);
+ cur_to_req(ref);
+ v4l2_ctrl_unlock(ctrl);
+ }
+
+ mutex_lock(main_hdl->lock);
+ WARN_ON(!hdl->request_is_queued);
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ mutex_unlock(main_hdl->lock);
+ media_request_object_complete(obj);
+ media_request_object_put(obj);
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_complete);
+
+int v4l2_ctrl_request_setup(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+ int ret = 0;
+
+ if (!req || !main_hdl)
+ return 0;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return -EBUSY;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj)
+ return 0;
+ if (obj->completed) {
+ media_request_object_put(obj);
+ return -EBUSY;
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node)
+ ref->req_done = false;
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ bool have_new_data = false;
+ int i;
+
+ /*
+ * Skip if this control was already handled by a cluster.
+ * Skip button controls and read-only controls.
+ */
+ if (ref->req_done || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ v4l2_ctrl_lock(master);
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ if (r->p_req_valid) {
+ have_new_data = true;
+ break;
+ }
+ }
+ }
+ if (!have_new_data) {
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ ret = req_to_new(r);
+ if (ret) {
+ v4l2_ctrl_unlock(master);
+ goto error;
+ }
+ master->cluster[i]->is_new = 1;
+ r->req_done = true;
+ }
+ }
+ /*
+ * For volatile autoclusters that are currently in auto mode
+ * we need to discover if it will be set to manual mode.
+ * If so, then we have to copy the current volatile values
+ * first since those will become the new manual values (which
+ * may be overwritten by explicit new values from this set
+ * of controls).
+ */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ s32 new_auto_val = *master->p_new.p_s32;
+
+ /*
+ * If the new value == the manual value, then copy
+ * the current volatile values.
+ */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ ret = try_or_set_cluster(NULL, master, true, 0);
+ v4l2_ctrl_unlock(master);
+
+ if (ret)
+ break;
+ }
+
+error:
+ media_request_object_put(obj);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_setup);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
deleted file mode 100644
index fccd08b66d1a..000000000000
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ /dev/null
@@ -1,2928 +0,0 @@
-/*
- V4L2 controls framework implementation.
-
- Copyright (C) 2010 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/ctype.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-dev.h>
-
-#define has_op(master, op) \
- (master->ops && master->ops->op)
-#define call_op(master, op) \
- (has_op(master, op) ? master->ops->op(master) : 0)
-
-/* Internal temporary helper struct, one for each v4l2_ext_control */
-struct v4l2_ctrl_helper {
- /* Pointer to the control reference of the master control */
- struct v4l2_ctrl_ref *mref;
- /* The control corresponding to the v4l2_ext_control ID field. */
- struct v4l2_ctrl *ctrl;
- /* v4l2_ext_control index of the next control belonging to the
- same cluster, or 0 if there isn't any. */
- u32 next;
-};
-
-/* Small helper function to determine if the autocluster is set to manual
- mode. */
-static bool is_cur_manual(const struct v4l2_ctrl *master)
-{
- return master->is_auto && master->cur.val == master->manual_mode_value;
-}
-
-/* Same as above, but this checks the against the new value instead of the
- current value. */
-static bool is_new_manual(const struct v4l2_ctrl *master)
-{
- return master->is_auto && master->val == master->manual_mode_value;
-}
-
-/* Returns NULL or a character pointer array containing the menu for
- the given control ID. The pointer array ends with a NULL pointer.
- An empty string signifies a menu entry that is invalid. This allows
- drivers to disable certain options if it is not supported. */
-const char * const *v4l2_ctrl_get_menu(u32 id)
-{
- static const char * const mpeg_audio_sampling_freq[] = {
- "44.1 kHz",
- "48 kHz",
- "32 kHz",
- NULL
- };
- static const char * const mpeg_audio_encoding[] = {
- "MPEG-1/2 Layer I",
- "MPEG-1/2 Layer II",
- "MPEG-1/2 Layer III",
- "MPEG-2/4 AAC",
- "AC-3",
- NULL
- };
- static const char * const mpeg_audio_l1_bitrate[] = {
- "32 kbps",
- "64 kbps",
- "96 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "288 kbps",
- "320 kbps",
- "352 kbps",
- "384 kbps",
- "416 kbps",
- "448 kbps",
- NULL
- };
- static const char * const mpeg_audio_l2_bitrate[] = {
- "32 kbps",
- "48 kbps",
- "56 kbps",
- "64 kbps",
- "80 kbps",
- "96 kbps",
- "112 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "320 kbps",
- "384 kbps",
- NULL
- };
- static const char * const mpeg_audio_l3_bitrate[] = {
- "32 kbps",
- "40 kbps",
- "48 kbps",
- "56 kbps",
- "64 kbps",
- "80 kbps",
- "96 kbps",
- "112 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "320 kbps",
- NULL
- };
- static const char * const mpeg_audio_ac3_bitrate[] = {
- "32 kbps",
- "40 kbps",
- "48 kbps",
- "56 kbps",
- "64 kbps",
- "80 kbps",
- "96 kbps",
- "112 kbps",
- "128 kbps",
- "160 kbps",
- "192 kbps",
- "224 kbps",
- "256 kbps",
- "320 kbps",
- "384 kbps",
- "448 kbps",
- "512 kbps",
- "576 kbps",
- "640 kbps",
- NULL
- };
- static const char * const mpeg_audio_mode[] = {
- "Stereo",
- "Joint Stereo",
- "Dual",
- "Mono",
- NULL
- };
- static const char * const mpeg_audio_mode_extension[] = {
- "Bound 4",
- "Bound 8",
- "Bound 12",
- "Bound 16",
- NULL
- };
- static const char * const mpeg_audio_emphasis[] = {
- "No Emphasis",
- "50/15 us",
- "CCITT J17",
- NULL
- };
- static const char * const mpeg_audio_crc[] = {
- "No CRC",
- "16-bit CRC",
- NULL
- };
- static const char * const mpeg_audio_dec_playback[] = {
- "Auto",
- "Stereo",
- "Left",
- "Right",
- "Mono",
- "Swapped Stereo",
- NULL
- };
- static const char * const mpeg_video_encoding[] = {
- "MPEG-1",
- "MPEG-2",
- "MPEG-4 AVC",
- NULL
- };
- static const char * const mpeg_video_aspect[] = {
- "1x1",
- "4x3",
- "16x9",
- "2.21x1",
- NULL
- };
- static const char * const mpeg_video_bitrate_mode[] = {
- "Variable Bitrate",
- "Constant Bitrate",
- NULL
- };
- static const char * const mpeg_stream_type[] = {
- "MPEG-2 Program Stream",
- "MPEG-2 Transport Stream",
- "MPEG-1 System Stream",
- "MPEG-2 DVD-compatible Stream",
- "MPEG-1 VCD-compatible Stream",
- "MPEG-2 SVCD-compatible Stream",
- NULL
- };
- static const char * const mpeg_stream_vbi_fmt[] = {
- "No VBI",
- "Private Packet, IVTV Format",
- NULL
- };
- static const char * const camera_power_line_frequency[] = {
- "Disabled",
- "50 Hz",
- "60 Hz",
- "Auto",
- NULL
- };
- static const char * const camera_exposure_auto[] = {
- "Auto Mode",
- "Manual Mode",
- "Shutter Priority Mode",
- "Aperture Priority Mode",
- NULL
- };
- static const char * const camera_exposure_metering[] = {
- "Average",
- "Center Weighted",
- "Spot",
- "Matrix",
- NULL
- };
- static const char * const camera_auto_focus_range[] = {
- "Auto",
- "Normal",
- "Macro",
- "Infinity",
- NULL
- };
- static const char * const colorfx[] = {
- "None",
- "Black & White",
- "Sepia",
- "Negative",
- "Emboss",
- "Sketch",
- "Sky Blue",
- "Grass Green",
- "Skin Whiten",
- "Vivid",
- "Aqua",
- "Art Freeze",
- "Silhouette",
- "Solarization",
- "Antique",
- "Set Cb/Cr",
- NULL
- };
- static const char * const auto_n_preset_white_balance[] = {
- "Manual",
- "Auto",
- "Incandescent",
- "Fluorescent",
- "Fluorescent H",
- "Horizon",
- "Daylight",
- "Flash",
- "Cloudy",
- "Shade",
- NULL,
- };
- static const char * const camera_iso_sensitivity_auto[] = {
- "Manual",
- "Auto",
- NULL
- };
- static const char * const scene_mode[] = {
- "None",
- "Backlight",
- "Beach/Snow",
- "Candle Light",
- "Dusk/Dawn",
- "Fall Colors",
- "Fireworks",
- "Landscape",
- "Night",
- "Party/Indoor",
- "Portrait",
- "Sports",
- "Sunset",
- "Text",
- NULL
- };
- static const char * const tune_emphasis[] = {
- "None",
- "50 Microseconds",
- "75 Microseconds",
- NULL,
- };
- static const char * const header_mode[] = {
- "Separate Buffer",
- "Joined With 1st Frame",
- NULL,
- };
- static const char * const multi_slice[] = {
- "Single",
- "Max Macroblocks",
- "Max Bytes",
- NULL,
- };
- static const char * const entropy_mode[] = {
- "CAVLC",
- "CABAC",
- NULL,
- };
- static const char * const mpeg_h264_level[] = {
- "1",
- "1b",
- "1.1",
- "1.2",
- "1.3",
- "2",
- "2.1",
- "2.2",
- "3",
- "3.1",
- "3.2",
- "4",
- "4.1",
- "4.2",
- "5",
- "5.1",
- NULL,
- };
- static const char * const h264_loop_filter[] = {
- "Enabled",
- "Disabled",
- "Disabled at Slice Boundary",
- NULL,
- };
- static const char * const h264_profile[] = {
- "Baseline",
- "Constrained Baseline",
- "Main",
- "Extended",
- "High",
- "High 10",
- "High 422",
- "High 444 Predictive",
- "High 10 Intra",
- "High 422 Intra",
- "High 444 Intra",
- "CAVLC 444 Intra",
- "Scalable Baseline",
- "Scalable High",
- "Scalable High Intra",
- "Multiview High",
- NULL,
- };
- static const char * const vui_sar_idc[] = {
- "Unspecified",
- "1:1",
- "12:11",
- "10:11",
- "16:11",
- "40:33",
- "24:11",
- "20:11",
- "32:11",
- "80:33",
- "18:11",
- "15:11",
- "64:33",
- "160:99",
- "4:3",
- "3:2",
- "2:1",
- "Extended SAR",
- NULL,
- };
- static const char * const h264_fp_arrangement_type[] = {
- "Checkerboard",
- "Column",
- "Row",
- "Side by Side",
- "Top Bottom",
- "Temporal",
- NULL,
- };
- static const char * const h264_fmo_map_type[] = {
- "Interleaved Slices",
- "Scattered Slices",
- "Foreground with Leftover",
- "Box Out",
- "Raster Scan",
- "Wipe Scan",
- "Explicit",
- NULL,
- };
- static const char * const mpeg_mpeg4_level[] = {
- "0",
- "0b",
- "1",
- "2",
- "3",
- "3b",
- "4",
- "5",
- NULL,
- };
- static const char * const mpeg4_profile[] = {
- "Simple",
- "Advanced Simple",
- "Core",
- "Simple Scalable",
- "Advanced Coding Efficency",
- NULL,
- };
-
- static const char * const flash_led_mode[] = {
- "Off",
- "Flash",
- "Torch",
- NULL,
- };
- static const char * const flash_strobe_source[] = {
- "Software",
- "External",
- NULL,
- };
-
- static const char * const jpeg_chroma_subsampling[] = {
- "4:4:4",
- "4:2:2",
- "4:2:0",
- "4:1:1",
- "4:1:0",
- "Gray",
- NULL,
- };
- static const char * const dv_tx_mode[] = {
- "DVI-D",
- "HDMI",
- NULL,
- };
- static const char * const dv_rgb_range[] = {
- "Automatic",
- "RGB limited range (16-235)",
- "RGB full range (0-255)",
- NULL,
- };
-
-
- switch (id) {
- case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
- return mpeg_audio_sampling_freq;
- case V4L2_CID_MPEG_AUDIO_ENCODING:
- return mpeg_audio_encoding;
- case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
- return mpeg_audio_l1_bitrate;
- case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
- return mpeg_audio_l2_bitrate;
- case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
- return mpeg_audio_l3_bitrate;
- case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
- return mpeg_audio_ac3_bitrate;
- case V4L2_CID_MPEG_AUDIO_MODE:
- return mpeg_audio_mode;
- case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
- return mpeg_audio_mode_extension;
- case V4L2_CID_MPEG_AUDIO_EMPHASIS:
- return mpeg_audio_emphasis;
- case V4L2_CID_MPEG_AUDIO_CRC:
- return mpeg_audio_crc;
- case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
- case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
- return mpeg_audio_dec_playback;
- case V4L2_CID_MPEG_VIDEO_ENCODING:
- return mpeg_video_encoding;
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- return mpeg_video_aspect;
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- return mpeg_video_bitrate_mode;
- case V4L2_CID_MPEG_STREAM_TYPE:
- return mpeg_stream_type;
- case V4L2_CID_MPEG_STREAM_VBI_FMT:
- return mpeg_stream_vbi_fmt;
- case V4L2_CID_POWER_LINE_FREQUENCY:
- return camera_power_line_frequency;
- case V4L2_CID_EXPOSURE_AUTO:
- return camera_exposure_auto;
- case V4L2_CID_EXPOSURE_METERING:
- return camera_exposure_metering;
- case V4L2_CID_AUTO_FOCUS_RANGE:
- return camera_auto_focus_range;
- case V4L2_CID_COLORFX:
- return colorfx;
- case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
- return auto_n_preset_white_balance;
- case V4L2_CID_ISO_SENSITIVITY_AUTO:
- return camera_iso_sensitivity_auto;
- case V4L2_CID_SCENE_MODE:
- return scene_mode;
- case V4L2_CID_TUNE_PREEMPHASIS:
- return tune_emphasis;
- case V4L2_CID_TUNE_DEEMPHASIS:
- return tune_emphasis;
- case V4L2_CID_FLASH_LED_MODE:
- return flash_led_mode;
- case V4L2_CID_FLASH_STROBE_SOURCE:
- return flash_strobe_source;
- case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
- return header_mode;
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
- return multi_slice;
- case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
- return entropy_mode;
- case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- return mpeg_h264_level;
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
- return h264_loop_filter;
- case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- return h264_profile;
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
- return vui_sar_idc;
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
- return h264_fp_arrangement_type;
- case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
- return h264_fmo_map_type;
- case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
- return mpeg_mpeg4_level;
- case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- return mpeg4_profile;
- case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
- return jpeg_chroma_subsampling;
- case V4L2_CID_DV_TX_MODE:
- return dv_tx_mode;
- case V4L2_CID_DV_TX_RGB_RANGE:
- case V4L2_CID_DV_RX_RGB_RANGE:
- return dv_rgb_range;
-
- default:
- return NULL;
- }
-}
-EXPORT_SYMBOL(v4l2_ctrl_get_menu);
-
-/* Return the control name. */
-const char *v4l2_ctrl_get_name(u32 id)
-{
- switch (id) {
- /* USER controls */
- /* Keep the order of the 'case's the same as in videodev2.h! */
- case V4L2_CID_USER_CLASS: return "User Controls";
- case V4L2_CID_BRIGHTNESS: return "Brightness";
- case V4L2_CID_CONTRAST: return "Contrast";
- case V4L2_CID_SATURATION: return "Saturation";
- case V4L2_CID_HUE: return "Hue";
- case V4L2_CID_AUDIO_VOLUME: return "Volume";
- case V4L2_CID_AUDIO_BALANCE: return "Balance";
- case V4L2_CID_AUDIO_BASS: return "Bass";
- case V4L2_CID_AUDIO_TREBLE: return "Treble";
- case V4L2_CID_AUDIO_MUTE: return "Mute";
- case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
- case V4L2_CID_BLACK_LEVEL: return "Black Level";
- case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
- case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
- case V4L2_CID_RED_BALANCE: return "Red Balance";
- case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
- case V4L2_CID_GAMMA: return "Gamma";
- case V4L2_CID_EXPOSURE: return "Exposure";
- case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
- case V4L2_CID_GAIN: return "Gain";
- case V4L2_CID_HFLIP: return "Horizontal Flip";
- case V4L2_CID_VFLIP: return "Vertical Flip";
- case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
- case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
- case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
- case V4L2_CID_SHARPNESS: return "Sharpness";
- case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
- case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
- case V4L2_CID_COLOR_KILLER: return "Color Killer";
- case V4L2_CID_COLORFX: return "Color Effects";
- case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
- case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
- case V4L2_CID_ROTATE: return "Rotate";
- case V4L2_CID_BG_COLOR: return "Background Color";
- case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
- case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
- case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
- case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
- case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
- case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
-
- /* MPEG controls */
- /* Keep the order of the 'case's the same as in videodev2.h! */
- case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
- case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
- case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
- case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
- case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
- case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
- case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
- case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
- case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
- case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
- case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
- case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
- case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
- case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
- case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
- case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
- case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
- case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
- case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
- case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
- case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
- case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback";
- case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback";
- case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
- case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
- case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
- case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
- case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
- case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
- case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
- case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
- case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
- case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
- case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
- case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
- case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
- case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
- case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
- case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
- case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
- case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
- case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
- case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
- case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
- case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
- case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
- case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
- case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI";
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0";
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type";
- case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp";
- case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs";
- case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering";
- case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order";
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding";
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type";
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers";
- case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP:
- return "H264 Set QP Value for HC Layers";
- case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
- case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
- case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
- case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
- case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
- case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
- case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
- case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
-
- /* CAMERA controls */
- /* Keep the order of the 'case's the same as in videodev2.h! */
- case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
- case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
- case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
- case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
- case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
- case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
- case V4L2_CID_PAN_RESET: return "Pan, Reset";
- case V4L2_CID_TILT_RESET: return "Tilt, Reset";
- case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
- case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
- case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
- case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
- case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
- case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
- case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
- case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
- case V4L2_CID_PRIVACY: return "Privacy";
- case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
- case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
- case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
- case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
- case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
- case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
- case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
- case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
- case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
- case V4L2_CID_SCENE_MODE: return "Scene Mode";
- case V4L2_CID_3A_LOCK: return "3A Lock";
- case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
- case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
- case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
- case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
-
- /* FM Radio Modulator control */
- /* Keep the order of the 'case's the same as in videodev2.h! */
- case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
- case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
- case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
- case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
- case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
- case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
- case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
- case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
- case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
- case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
- case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
- case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
- case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
- case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
- case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
- case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
- case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
- case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
- case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
-
- /* Flash controls */
- case V4L2_CID_FLASH_CLASS: return "Flash Controls";
- case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
- case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
- case V4L2_CID_FLASH_STROBE: return "Strobe";
- case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
- case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
- case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
- case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
- case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
- case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
- case V4L2_CID_FLASH_FAULT: return "Faults";
- case V4L2_CID_FLASH_CHARGE: return "Charge";
- case V4L2_CID_FLASH_READY: return "Ready to Strobe";
-
- /* JPEG encoder controls */
- /* Keep the order of the 'case's the same as in videodev2.h! */
- case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls";
- case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling";
- case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval";
- case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
- case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
-
- /* Image source controls */
- case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
- case V4L2_CID_VBLANK: return "Vertical Blanking";
- case V4L2_CID_HBLANK: return "Horizontal Blanking";
- case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
-
- /* Image processing controls */
- case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
- case V4L2_CID_LINK_FREQ: return "Link Frequency";
- case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
- case V4L2_CID_TEST_PATTERN: return "Test Pattern";
-
- /* DV controls */
- case V4L2_CID_DV_CLASS: return "Digital Video Controls";
- case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present";
- case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present";
- case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present";
- case V4L2_CID_DV_TX_MODE: return "Transmit Mode";
- case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range";
- case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present";
- case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range";
-
- case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
- case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
- case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
- default:
- return NULL;
- }
-}
-EXPORT_SYMBOL(v4l2_ctrl_get_name);
-
-void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
- s32 *min, s32 *max, s32 *step, s32 *def, u32 *flags)
-{
- *name = v4l2_ctrl_get_name(id);
- *flags = 0;
-
- switch (id) {
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_LOUDNESS:
- case V4L2_CID_AUTO_WHITE_BALANCE:
- case V4L2_CID_AUTOGAIN:
- case V4L2_CID_HFLIP:
- case V4L2_CID_VFLIP:
- case V4L2_CID_HUE_AUTO:
- case V4L2_CID_CHROMA_AGC:
- case V4L2_CID_COLOR_KILLER:
- case V4L2_CID_AUTOBRIGHTNESS:
- case V4L2_CID_MPEG_AUDIO_MUTE:
- case V4L2_CID_MPEG_VIDEO_MUTE:
- case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
- case V4L2_CID_MPEG_VIDEO_PULLDOWN:
- case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
- case V4L2_CID_FOCUS_AUTO:
- case V4L2_CID_PRIVACY:
- case V4L2_CID_AUDIO_LIMITER_ENABLED:
- case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
- case V4L2_CID_PILOT_TONE_ENABLED:
- case V4L2_CID_ILLUMINATORS_1:
- case V4L2_CID_ILLUMINATORS_2:
- case V4L2_CID_FLASH_STROBE_STATUS:
- case V4L2_CID_FLASH_CHARGE:
- case V4L2_CID_FLASH_READY:
- case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
- case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
- case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
- case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
- case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
- case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
- case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER:
- case V4L2_CID_WIDE_DYNAMIC_RANGE:
- case V4L2_CID_IMAGE_STABILIZATION:
- case V4L2_CID_RDS_RECEPTION:
- *type = V4L2_CTRL_TYPE_BOOLEAN;
- *min = 0;
- *max = *step = 1;
- break;
- case V4L2_CID_PAN_RESET:
- case V4L2_CID_TILT_RESET:
- case V4L2_CID_FLASH_STROBE:
- case V4L2_CID_FLASH_STROBE_STOP:
- case V4L2_CID_AUTO_FOCUS_START:
- case V4L2_CID_AUTO_FOCUS_STOP:
- *type = V4L2_CTRL_TYPE_BUTTON;
- *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
- *min = *max = *step = *def = 0;
- break;
- case V4L2_CID_POWER_LINE_FREQUENCY:
- case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
- case V4L2_CID_MPEG_AUDIO_ENCODING:
- case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
- case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
- case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
- case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
- case V4L2_CID_MPEG_AUDIO_MODE:
- case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
- case V4L2_CID_MPEG_AUDIO_EMPHASIS:
- case V4L2_CID_MPEG_AUDIO_CRC:
- case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
- case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
- case V4L2_CID_MPEG_VIDEO_ENCODING:
- case V4L2_CID_MPEG_VIDEO_ASPECT:
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- case V4L2_CID_MPEG_STREAM_TYPE:
- case V4L2_CID_MPEG_STREAM_VBI_FMT:
- case V4L2_CID_EXPOSURE_AUTO:
- case V4L2_CID_AUTO_FOCUS_RANGE:
- case V4L2_CID_COLORFX:
- case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
- case V4L2_CID_TUNE_PREEMPHASIS:
- case V4L2_CID_FLASH_LED_MODE:
- case V4L2_CID_FLASH_STROBE_SOURCE:
- case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
- case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
- case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
- case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
- case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
- case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
- case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
- case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
- case V4L2_CID_ISO_SENSITIVITY_AUTO:
- case V4L2_CID_EXPOSURE_METERING:
- case V4L2_CID_SCENE_MODE:
- case V4L2_CID_DV_TX_MODE:
- case V4L2_CID_DV_TX_RGB_RANGE:
- case V4L2_CID_DV_RX_RGB_RANGE:
- case V4L2_CID_TEST_PATTERN:
- case V4L2_CID_TUNE_DEEMPHASIS:
- *type = V4L2_CTRL_TYPE_MENU;
- break;
- case V4L2_CID_LINK_FREQ:
- *type = V4L2_CTRL_TYPE_INTEGER_MENU;
- break;
- case V4L2_CID_RDS_TX_PS_NAME:
- case V4L2_CID_RDS_TX_RADIO_TEXT:
- *type = V4L2_CTRL_TYPE_STRING;
- break;
- case V4L2_CID_ISO_SENSITIVITY:
- case V4L2_CID_AUTO_EXPOSURE_BIAS:
- *type = V4L2_CTRL_TYPE_INTEGER_MENU;
- break;
- case V4L2_CID_USER_CLASS:
- case V4L2_CID_CAMERA_CLASS:
- case V4L2_CID_MPEG_CLASS:
- case V4L2_CID_FM_TX_CLASS:
- case V4L2_CID_FLASH_CLASS:
- case V4L2_CID_JPEG_CLASS:
- case V4L2_CID_IMAGE_SOURCE_CLASS:
- case V4L2_CID_IMAGE_PROC_CLASS:
- case V4L2_CID_DV_CLASS:
- case V4L2_CID_FM_RX_CLASS:
- *type = V4L2_CTRL_TYPE_CTRL_CLASS;
- /* You can neither read not write these */
- *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
- *min = *max = *step = *def = 0;
- break;
- case V4L2_CID_BG_COLOR:
- *type = V4L2_CTRL_TYPE_INTEGER;
- *step = 1;
- *min = 0;
- /* Max is calculated as RGB888 that is 2^24 */
- *max = 0xFFFFFF;
- break;
- case V4L2_CID_FLASH_FAULT:
- case V4L2_CID_JPEG_ACTIVE_MARKER:
- case V4L2_CID_3A_LOCK:
- case V4L2_CID_AUTO_FOCUS_STATUS:
- case V4L2_CID_DV_TX_HOTPLUG:
- case V4L2_CID_DV_TX_RXSENSE:
- case V4L2_CID_DV_TX_EDID_PRESENT:
- case V4L2_CID_DV_RX_POWER_PRESENT:
- *type = V4L2_CTRL_TYPE_BITMASK;
- break;
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
- case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
- *type = V4L2_CTRL_TYPE_INTEGER;
- *flags |= V4L2_CTRL_FLAG_READ_ONLY;
- break;
- case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
- case V4L2_CID_MPEG_VIDEO_DEC_PTS:
- *flags |= V4L2_CTRL_FLAG_VOLATILE;
- /* Fall through */
- case V4L2_CID_PIXEL_RATE:
- *type = V4L2_CTRL_TYPE_INTEGER64;
- *flags |= V4L2_CTRL_FLAG_READ_ONLY;
- *min = *max = *step = *def = 0;
- break;
- default:
- *type = V4L2_CTRL_TYPE_INTEGER;
- break;
- }
- switch (id) {
- case V4L2_CID_MPEG_AUDIO_ENCODING:
- case V4L2_CID_MPEG_AUDIO_MODE:
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- case V4L2_CID_MPEG_STREAM_TYPE:
- *flags |= V4L2_CTRL_FLAG_UPDATE;
- break;
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_BRIGHTNESS:
- case V4L2_CID_CONTRAST:
- case V4L2_CID_SATURATION:
- case V4L2_CID_HUE:
- case V4L2_CID_RED_BALANCE:
- case V4L2_CID_BLUE_BALANCE:
- case V4L2_CID_GAMMA:
- case V4L2_CID_SHARPNESS:
- case V4L2_CID_CHROMA_GAIN:
- case V4L2_CID_RDS_TX_DEVIATION:
- case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
- case V4L2_CID_AUDIO_LIMITER_DEVIATION:
- case V4L2_CID_AUDIO_COMPRESSION_GAIN:
- case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
- case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
- case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
- case V4L2_CID_PILOT_TONE_DEVIATION:
- case V4L2_CID_PILOT_TONE_FREQUENCY:
- case V4L2_CID_TUNE_POWER_LEVEL:
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- *flags |= V4L2_CTRL_FLAG_SLIDER;
- break;
- case V4L2_CID_PAN_RELATIVE:
- case V4L2_CID_TILT_RELATIVE:
- case V4L2_CID_FOCUS_RELATIVE:
- case V4L2_CID_IRIS_RELATIVE:
- case V4L2_CID_ZOOM_RELATIVE:
- *flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
- break;
- case V4L2_CID_FLASH_STROBE_STATUS:
- case V4L2_CID_AUTO_FOCUS_STATUS:
- case V4L2_CID_FLASH_READY:
- case V4L2_CID_DV_TX_HOTPLUG:
- case V4L2_CID_DV_TX_RXSENSE:
- case V4L2_CID_DV_TX_EDID_PRESENT:
- case V4L2_CID_DV_RX_POWER_PRESENT:
- *flags |= V4L2_CTRL_FLAG_READ_ONLY;
- break;
- }
-}
-EXPORT_SYMBOL(v4l2_ctrl_fill);
-
-/* Helper function to determine whether the control type is compatible with
- VIDIOC_G/S_CTRL. */
-static bool type_is_int(const struct v4l2_ctrl *ctrl)
-{
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER64:
- case V4L2_CTRL_TYPE_STRING:
- /* Nope, these need v4l2_ext_control */
- return false;
- default:
- return true;
- }
-}
-
-static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
-{
- memset(ev->reserved, 0, sizeof(ev->reserved));
- ev->type = V4L2_EVENT_CTRL;
- ev->id = ctrl->id;
- ev->u.ctrl.changes = changes;
- ev->u.ctrl.type = ctrl->type;
- ev->u.ctrl.flags = ctrl->flags;
- if (ctrl->type == V4L2_CTRL_TYPE_STRING)
- ev->u.ctrl.value64 = 0;
- else
- ev->u.ctrl.value64 = ctrl->cur.val64;
- ev->u.ctrl.minimum = ctrl->minimum;
- ev->u.ctrl.maximum = ctrl->maximum;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU
- || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
- ev->u.ctrl.step = 1;
- else
- ev->u.ctrl.step = ctrl->step;
- ev->u.ctrl.default_value = ctrl->default_value;
-}
-
-static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
-{
- struct v4l2_event ev;
- struct v4l2_subscribed_event *sev;
-
- if (list_empty(&ctrl->ev_subs))
- return;
- fill_event(&ev, ctrl, changes);
-
- list_for_each_entry(sev, &ctrl->ev_subs, node)
- if (sev->fh != fh ||
- (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
- v4l2_event_queue_fh(sev->fh, &ev);
-}
-
-/* Helper function: copy the current control value back to the caller */
-static int cur_to_user(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl)
-{
- u32 len;
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_STRING:
- len = strlen(ctrl->cur.string);
- if (c->size < len + 1) {
- c->size = len + 1;
- return -ENOSPC;
- }
- return copy_to_user(c->string, ctrl->cur.string,
- len + 1) ? -EFAULT : 0;
- case V4L2_CTRL_TYPE_INTEGER64:
- c->value64 = ctrl->cur.val64;
- break;
- default:
- c->value = ctrl->cur.val;
- break;
- }
- return 0;
-}
-
-/* Helper function: copy the caller-provider value as the new control value */
-static int user_to_new(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl)
-{
- int ret;
- u32 size;
-
- ctrl->is_new = 1;
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER64:
- ctrl->val64 = c->value64;
- break;
- case V4L2_CTRL_TYPE_STRING:
- size = c->size;
- if (size == 0)
- return -ERANGE;
- if (size > ctrl->maximum + 1)
- size = ctrl->maximum + 1;
- ret = copy_from_user(ctrl->string, c->string, size);
- if (!ret) {
- char last = ctrl->string[size - 1];
-
- ctrl->string[size - 1] = 0;
- /* If the string was longer than ctrl->maximum,
- then return an error. */
- if (strlen(ctrl->string) == ctrl->maximum && last)
- return -ERANGE;
- }
- return ret ? -EFAULT : 0;
- default:
- ctrl->val = c->value;
- break;
- }
- return 0;
-}
-
-/* Helper function: copy the new control value back to the caller */
-static int new_to_user(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl)
-{
- u32 len;
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_STRING:
- len = strlen(ctrl->string);
- if (c->size < len + 1) {
- c->size = ctrl->maximum + 1;
- return -ENOSPC;
- }
- return copy_to_user(c->string, ctrl->string,
- len + 1) ? -EFAULT : 0;
- case V4L2_CTRL_TYPE_INTEGER64:
- c->value64 = ctrl->val64;
- break;
- default:
- c->value = ctrl->val;
- break;
- }
- return 0;
-}
-
-/* Copy the new value to the current value. */
-static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
-{
- bool changed = false;
-
- if (ctrl == NULL)
- return;
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_BUTTON:
- changed = true;
- break;
- case V4L2_CTRL_TYPE_STRING:
- /* strings are always 0-terminated */
- changed = strcmp(ctrl->string, ctrl->cur.string);
- strcpy(ctrl->cur.string, ctrl->string);
- break;
- case V4L2_CTRL_TYPE_INTEGER64:
- changed = ctrl->val64 != ctrl->cur.val64;
- ctrl->cur.val64 = ctrl->val64;
- break;
- default:
- changed = ctrl->val != ctrl->cur.val;
- ctrl->cur.val = ctrl->val;
- break;
- }
- if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
- /* Note: CH_FLAGS is only set for auto clusters. */
- ctrl->flags &=
- ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
- if (!is_cur_manual(ctrl->cluster[0])) {
- ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
- if (ctrl->cluster[0]->has_volatiles)
- ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
- }
- fh = NULL;
- }
- if (changed || ch_flags) {
- /* If a control was changed that was not one of the controls
- modified by the application, then send the event to all. */
- if (!ctrl->is_new)
- fh = NULL;
- send_event(fh, ctrl,
- (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
- if (ctrl->call_notify && changed && ctrl->handler->notify)
- ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
- }
-}
-
-/* Copy the current value to the new value */
-static void cur_to_new(struct v4l2_ctrl *ctrl)
-{
- if (ctrl == NULL)
- return;
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_STRING:
- /* strings are always 0-terminated */
- strcpy(ctrl->string, ctrl->cur.string);
- break;
- case V4L2_CTRL_TYPE_INTEGER64:
- ctrl->val64 = ctrl->cur.val64;
- break;
- default:
- ctrl->val = ctrl->cur.val;
- break;
- }
-}
-
-/* Return non-zero if one or more of the controls in the cluster has a new
- value that differs from the current value. */
-static int cluster_changed(struct v4l2_ctrl *master)
-{
- int diff = 0;
- int i;
-
- for (i = 0; !diff && i < master->ncontrols; i++) {
- struct v4l2_ctrl *ctrl = master->cluster[i];
-
- if (ctrl == NULL)
- continue;
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_BUTTON:
- /* Button controls are always 'different' */
- return 1;
- case V4L2_CTRL_TYPE_STRING:
- /* strings are always 0-terminated */
- diff = strcmp(ctrl->string, ctrl->cur.string);
- break;
- case V4L2_CTRL_TYPE_INTEGER64:
- diff = ctrl->val64 != ctrl->cur.val64;
- break;
- default:
- diff = ctrl->val != ctrl->cur.val;
- break;
- }
- }
- return diff;
-}
-
-/* Control range checking */
-static int check_range(enum v4l2_ctrl_type type,
- s32 min, s32 max, u32 step, s32 def)
-{
- switch (type) {
- case V4L2_CTRL_TYPE_BOOLEAN:
- if (step != 1 || max > 1 || min < 0)
- return -ERANGE;
- /* fall through */
- case V4L2_CTRL_TYPE_INTEGER:
- if (step <= 0 || min > max || def < min || def > max)
- return -ERANGE;
- return 0;
- case V4L2_CTRL_TYPE_BITMASK:
- if (step || min || !max || (def & ~max))
- return -ERANGE;
- return 0;
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- if (min > max || def < min || def > max)
- return -ERANGE;
- /* Note: step == menu_skip_mask for menu controls.
- So here we check if the default value is masked out. */
- if (step && ((1 << def) & step))
- return -EINVAL;
- return 0;
- case V4L2_CTRL_TYPE_STRING:
- if (min > max || min < 0 || step < 1 || def)
- return -ERANGE;
- return 0;
- default:
- return 0;
- }
-}
-
-/* Validate a new control */
-static int validate_new(const struct v4l2_ctrl *ctrl,
- struct v4l2_ext_control *c)
-{
- size_t len;
- u32 offset;
- s32 val;
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER:
- /* Round towards the closest legal value */
- val = c->value + ctrl->step / 2;
- val = clamp(val, ctrl->minimum, ctrl->maximum);
- offset = val - ctrl->minimum;
- offset = ctrl->step * (offset / ctrl->step);
- c->value = ctrl->minimum + offset;
- return 0;
-
- case V4L2_CTRL_TYPE_BOOLEAN:
- c->value = !!c->value;
- return 0;
-
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- if (c->value < ctrl->minimum || c->value > ctrl->maximum)
- return -ERANGE;
- if (ctrl->menu_skip_mask & (1 << c->value))
- return -EINVAL;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
- ctrl->qmenu[c->value][0] == '\0')
- return -EINVAL;
- return 0;
-
- case V4L2_CTRL_TYPE_BITMASK:
- c->value &= ctrl->maximum;
- return 0;
-
- case V4L2_CTRL_TYPE_BUTTON:
- case V4L2_CTRL_TYPE_CTRL_CLASS:
- c->value = 0;
- return 0;
-
- case V4L2_CTRL_TYPE_INTEGER64:
- return 0;
-
- case V4L2_CTRL_TYPE_STRING:
- len = strlen(c->string);
- if (len < ctrl->minimum)
- return -ERANGE;
- if ((len - ctrl->minimum) % ctrl->step)
- return -ERANGE;
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static inline u32 node2id(struct list_head *node)
-{
- return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id;
-}
-
-/* Set the handler's error code if it wasn't set earlier already */
-static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
-{
- if (hdl->error == 0)
- hdl->error = err;
- return err;
-}
-
-/* Initialize the handler */
-int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
- unsigned nr_of_controls_hint,
- struct lock_class_key *key, const char *name)
-{
- hdl->lock = &hdl->_lock;
- mutex_init(hdl->lock);
- lockdep_set_class_and_name(hdl->lock, key, name);
- INIT_LIST_HEAD(&hdl->ctrls);
- INIT_LIST_HEAD(&hdl->ctrl_refs);
- hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
- hdl->buckets = kcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]),
- GFP_KERNEL);
- hdl->error = hdl->buckets ? 0 : -ENOMEM;
- return hdl->error;
-}
-EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
-
-/* Free all controls and control refs */
-void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
-{
- struct v4l2_ctrl_ref *ref, *next_ref;
- struct v4l2_ctrl *ctrl, *next_ctrl;
- struct v4l2_subscribed_event *sev, *next_sev;
-
- if (hdl == NULL || hdl->buckets == NULL)
- return;
-
- mutex_lock(hdl->lock);
- /* Free all nodes */
- list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
- list_del(&ref->node);
- kfree(ref);
- }
- /* Free all controls owned by the handler */
- list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
- list_del(&ctrl->node);
- list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
- list_del(&sev->node);
- kfree(ctrl);
- }
- kfree(hdl->buckets);
- hdl->buckets = NULL;
- hdl->cached = NULL;
- hdl->error = 0;
- mutex_unlock(hdl->lock);
-}
-EXPORT_SYMBOL(v4l2_ctrl_handler_free);
-
-/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer
- be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing
- with applications that do not use the NEXT_CTRL flag.
-
- We just find the n-th private user control. It's O(N), but that should not
- be an issue in this particular case. */
-static struct v4l2_ctrl_ref *find_private_ref(
- struct v4l2_ctrl_handler *hdl, u32 id)
-{
- struct v4l2_ctrl_ref *ref;
-
- id -= V4L2_CID_PRIVATE_BASE;
- list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- /* Search for private user controls that are compatible with
- VIDIOC_G/S_CTRL. */
- if (V4L2_CTRL_ID2CLASS(ref->ctrl->id) == V4L2_CTRL_CLASS_USER &&
- V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) {
- if (!type_is_int(ref->ctrl))
- continue;
- if (id == 0)
- return ref;
- id--;
- }
- }
- return NULL;
-}
-
-/* Find a control with the given ID. */
-static struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id)
-{
- struct v4l2_ctrl_ref *ref;
- int bucket;
-
- id &= V4L2_CTRL_ID_MASK;
-
- /* Old-style private controls need special handling */
- if (id >= V4L2_CID_PRIVATE_BASE)
- return find_private_ref(hdl, id);
- bucket = id % hdl->nr_of_buckets;
-
- /* Simple optimization: cache the last control found */
- if (hdl->cached && hdl->cached->ctrl->id == id)
- return hdl->cached;
-
- /* Not in cache, search the hash */
- ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
- while (ref && ref->ctrl->id != id)
- ref = ref->next;
-
- if (ref)
- hdl->cached = ref; /* cache it! */
- return ref;
-}
-
-/* Find a control with the given ID. Take the handler's lock first. */
-static struct v4l2_ctrl_ref *find_ref_lock(
- struct v4l2_ctrl_handler *hdl, u32 id)
-{
- struct v4l2_ctrl_ref *ref = NULL;
-
- if (hdl) {
- mutex_lock(hdl->lock);
- ref = find_ref(hdl, id);
- mutex_unlock(hdl->lock);
- }
- return ref;
-}
-
-/* Find a control with the given ID. */
-struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
-{
- struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
-
- return ref ? ref->ctrl : NULL;
-}
-EXPORT_SYMBOL(v4l2_ctrl_find);
-
-/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
-static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl *ctrl)
-{
- struct v4l2_ctrl_ref *ref;
- struct v4l2_ctrl_ref *new_ref;
- u32 id = ctrl->id;
- u32 class_ctrl = V4L2_CTRL_ID2CLASS(id) | 1;
- int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
-
- /* Automatically add the control class if it is not yet present. */
- if (id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL)
- if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0))
- return hdl->error;
-
- if (hdl->error)
- return hdl->error;
-
- new_ref = kzalloc(sizeof(*new_ref), GFP_KERNEL);
- if (!new_ref)
- return handler_set_err(hdl, -ENOMEM);
- new_ref->ctrl = ctrl;
- if (ctrl->handler == hdl) {
- /* By default each control starts in a cluster of its own.
- new_ref->ctrl is basically a cluster array with one
- element, so that's perfect to use as the cluster pointer.
- But only do this for the handler that owns the control. */
- ctrl->cluster = &new_ref->ctrl;
- ctrl->ncontrols = 1;
- }
-
- INIT_LIST_HEAD(&new_ref->node);
-
- mutex_lock(hdl->lock);
-
- /* Add immediately at the end of the list if the list is empty, or if
- the last element in the list has a lower ID.
- This ensures that when elements are added in ascending order the
- insertion is an O(1) operation. */
- if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
- list_add_tail(&new_ref->node, &hdl->ctrl_refs);
- goto insert_in_hash;
- }
-
- /* Find insert position in sorted list */
- list_for_each_entry(ref, &hdl->ctrl_refs, node) {
- if (ref->ctrl->id < id)
- continue;
- /* Don't add duplicates */
- if (ref->ctrl->id == id) {
- kfree(new_ref);
- goto unlock;
- }
- list_add(&new_ref->node, ref->node.prev);
- break;
- }
-
-insert_in_hash:
- /* Insert the control node in the hash */
- new_ref->next = hdl->buckets[bucket];
- hdl->buckets[bucket] = new_ref;
-
-unlock:
- mutex_unlock(hdl->lock);
- return 0;
-}
-
-/* Add a new control */
-static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops,
- u32 id, const char *name, enum v4l2_ctrl_type type,
- s32 min, s32 max, u32 step, s32 def,
- u32 flags, const char * const *qmenu,
- const s64 *qmenu_int, void *priv)
-{
- struct v4l2_ctrl *ctrl;
- unsigned sz_extra = 0;
- int err;
-
- if (hdl->error)
- return NULL;
-
- /* Sanity checks */
- if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE ||
- (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
- (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
- handler_set_err(hdl, -ERANGE);
- return NULL;
- }
- err = check_range(type, min, max, step, def);
- if (err) {
- handler_set_err(hdl, err);
- return NULL;
- }
- if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
- handler_set_err(hdl, -ERANGE);
- return NULL;
- }
-
- if (type == V4L2_CTRL_TYPE_BUTTON)
- flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
- else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
- flags |= V4L2_CTRL_FLAG_READ_ONLY;
- else if (type == V4L2_CTRL_TYPE_STRING)
- sz_extra += 2 * (max + 1);
-
- ctrl = kzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
- if (ctrl == NULL) {
- handler_set_err(hdl, -ENOMEM);
- return NULL;
- }
-
- INIT_LIST_HEAD(&ctrl->node);
- INIT_LIST_HEAD(&ctrl->ev_subs);
- ctrl->handler = hdl;
- ctrl->ops = ops;
- ctrl->id = id;
- ctrl->name = name;
- ctrl->type = type;
- ctrl->flags = flags;
- ctrl->minimum = min;
- ctrl->maximum = max;
- ctrl->step = step;
- if (type == V4L2_CTRL_TYPE_MENU)
- ctrl->qmenu = qmenu;
- else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
- ctrl->qmenu_int = qmenu_int;
- ctrl->priv = priv;
- ctrl->cur.val = ctrl->val = ctrl->default_value = def;
-
- if (ctrl->type == V4L2_CTRL_TYPE_STRING) {
- ctrl->cur.string = (char *)&ctrl[1] + sz_extra - (max + 1);
- ctrl->string = (char *)&ctrl[1] + sz_extra - 2 * (max + 1);
- if (ctrl->minimum)
- memset(ctrl->cur.string, ' ', ctrl->minimum);
- }
- if (handler_new_ref(hdl, ctrl)) {
- kfree(ctrl);
- return NULL;
- }
- mutex_lock(hdl->lock);
- list_add_tail(&ctrl->node, &hdl->ctrls);
- mutex_unlock(hdl->lock);
- return ctrl;
-}
-
-struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_config *cfg, void *priv)
-{
- bool is_menu;
- struct v4l2_ctrl *ctrl;
- const char *name = cfg->name;
- const char * const *qmenu = cfg->qmenu;
- const s64 *qmenu_int = cfg->qmenu_int;
- enum v4l2_ctrl_type type = cfg->type;
- u32 flags = cfg->flags;
- s32 min = cfg->min;
- s32 max = cfg->max;
- u32 step = cfg->step;
- s32 def = cfg->def;
-
- if (name == NULL)
- v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
- &def, &flags);
-
- is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
- cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
- if (is_menu)
- WARN_ON(step);
- else
- WARN_ON(cfg->menu_skip_mask);
- if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
- qmenu = v4l2_ctrl_get_menu(cfg->id);
- else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
- qmenu_int == NULL) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
-
- ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->id, name,
- type, min, max,
- is_menu ? cfg->menu_skip_mask : step,
- def, flags, qmenu, qmenu_int, priv);
- if (ctrl)
- ctrl->is_private = cfg->is_private;
- return ctrl;
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_custom);
-
-/* Helper function for standard non-menu controls */
-struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops,
- u32 id, s32 min, s32 max, u32 step, s32 def)
-{
- const char *name;
- enum v4l2_ctrl_type type;
- u32 flags;
-
- v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type == V4L2_CTRL_TYPE_MENU
- || type == V4L2_CTRL_TYPE_INTEGER_MENU) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- return v4l2_ctrl_new(hdl, ops, id, name, type,
- min, max, step, def, flags, NULL, NULL, NULL);
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_std);
-
-/* Helper function for standard menu controls */
-struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops,
- u32 id, s32 max, s32 mask, s32 def)
-{
- const char * const *qmenu = v4l2_ctrl_get_menu(id);
- const char *name;
- enum v4l2_ctrl_type type;
- s32 min;
- s32 step;
- u32 flags;
-
- v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type != V4L2_CTRL_TYPE_MENU) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- return v4l2_ctrl_new(hdl, ops, id, name, type,
- 0, max, mask, def, flags, qmenu, NULL, NULL);
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
-
-/* Helper function for standard menu controls with driver defined menu */
-struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops, u32 id, s32 max,
- s32 mask, s32 def, const char * const *qmenu)
-{
- enum v4l2_ctrl_type type;
- const char *name;
- u32 flags;
- s32 step;
- s32 min;
-
- /* v4l2_ctrl_new_std_menu_items() should only be called for
- * standard controls without a standard menu.
- */
- if (v4l2_ctrl_get_menu(id)) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
-
- v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- return v4l2_ctrl_new(hdl, ops, id, name, type, 0, max, mask, def,
- flags, qmenu, NULL, NULL);
-
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
-
-/* Helper function for standard integer menu controls */
-struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops,
- u32 id, s32 max, s32 def, const s64 *qmenu_int)
-{
- const char *name;
- enum v4l2_ctrl_type type;
- s32 min;
- s32 step;
- u32 flags;
-
- v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- return v4l2_ctrl_new(hdl, ops, id, name, type,
- 0, max, 0, def, flags, NULL, qmenu_int, NULL);
-}
-EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
-
-/* Add a control from another handler to this handler */
-struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl *ctrl)
-{
- if (hdl == NULL || hdl->error)
- return NULL;
- if (ctrl == NULL) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- if (ctrl->handler == hdl)
- return ctrl;
- return handler_new_ref(hdl, ctrl) ? NULL : ctrl;
-}
-EXPORT_SYMBOL(v4l2_ctrl_add_ctrl);
-
-/* Add the controls from another handler to our own. */
-int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl_handler *add,
- bool (*filter)(const struct v4l2_ctrl *ctrl))
-{
- struct v4l2_ctrl_ref *ref;
- int ret = 0;
-
- /* Do nothing if either handler is NULL or if they are the same */
- if (!hdl || !add || hdl == add)
- return 0;
- if (hdl->error)
- return hdl->error;
- mutex_lock(add->lock);
- list_for_each_entry(ref, &add->ctrl_refs, node) {
- struct v4l2_ctrl *ctrl = ref->ctrl;
-
- /* Skip handler-private controls. */
- if (ctrl->is_private)
- continue;
- /* And control classes */
- if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
- continue;
- /* Filter any unwanted controls */
- if (filter && !filter(ctrl))
- continue;
- ret = handler_new_ref(hdl, ctrl);
- if (ret)
- break;
- }
- mutex_unlock(add->lock);
- return ret;
-}
-EXPORT_SYMBOL(v4l2_ctrl_add_handler);
-
-bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
-{
- if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
- return true;
- if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
- return true;
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- case V4L2_CID_AUDIO_VOLUME:
- case V4L2_CID_AUDIO_BALANCE:
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- case V4L2_CID_AUDIO_LOUDNESS:
- return true;
- default:
- break;
- }
- return false;
-}
-EXPORT_SYMBOL(v4l2_ctrl_radio_filter);
-
-/* Cluster controls */
-void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
-{
- bool has_volatiles = false;
- int i;
-
- /* The first control is the master control and it must not be NULL */
- BUG_ON(ncontrols == 0 || controls[0] == NULL);
-
- for (i = 0; i < ncontrols; i++) {
- if (controls[i]) {
- controls[i]->cluster = controls;
- controls[i]->ncontrols = ncontrols;
- if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
- has_volatiles = true;
- }
- }
- controls[0]->has_volatiles = has_volatiles;
-}
-EXPORT_SYMBOL(v4l2_ctrl_cluster);
-
-void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
- u8 manual_val, bool set_volatile)
-{
- struct v4l2_ctrl *master = controls[0];
- u32 flag = 0;
- int i;
-
- v4l2_ctrl_cluster(ncontrols, controls);
- WARN_ON(ncontrols <= 1);
- WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
- WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
- master->is_auto = true;
- master->has_volatiles = set_volatile;
- master->manual_mode_value = manual_val;
- master->flags |= V4L2_CTRL_FLAG_UPDATE;
-
- if (!is_cur_manual(master))
- flag = V4L2_CTRL_FLAG_INACTIVE |
- (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
-
- for (i = 1; i < ncontrols; i++)
- if (controls[i])
- controls[i]->flags |= flag;
-}
-EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
-
-/* Activate/deactivate a control. */
-void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
-{
- /* invert since the actual flag is called 'inactive' */
- bool inactive = !active;
- bool old;
-
- if (ctrl == NULL)
- return;
-
- if (inactive)
- /* set V4L2_CTRL_FLAG_INACTIVE */
- old = test_and_set_bit(4, &ctrl->flags);
- else
- /* clear V4L2_CTRL_FLAG_INACTIVE */
- old = test_and_clear_bit(4, &ctrl->flags);
- if (old != inactive)
- send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
-}
-EXPORT_SYMBOL(v4l2_ctrl_activate);
-
-/* Grab/ungrab a control.
- Typically used when streaming starts and you want to grab controls,
- preventing the user from changing them.
-
- Just call this and the framework will block any attempts to change
- these controls. */
-void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
-{
- bool old;
-
- if (ctrl == NULL)
- return;
-
- v4l2_ctrl_lock(ctrl);
- if (grabbed)
- /* set V4L2_CTRL_FLAG_GRABBED */
- old = test_and_set_bit(1, &ctrl->flags);
- else
- /* clear V4L2_CTRL_FLAG_GRABBED */
- old = test_and_clear_bit(1, &ctrl->flags);
- if (old != grabbed)
- send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
- v4l2_ctrl_unlock(ctrl);
-}
-EXPORT_SYMBOL(v4l2_ctrl_grab);
-
-/* Log the control name and value */
-static void log_ctrl(const struct v4l2_ctrl *ctrl,
- const char *prefix, const char *colon)
-{
- if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
- return;
- if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
- return;
-
- printk(KERN_INFO "%s%s%s: ", prefix, colon, ctrl->name);
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER:
- printk(KERN_CONT "%d", ctrl->cur.val);
- break;
- case V4L2_CTRL_TYPE_BOOLEAN:
- printk(KERN_CONT "%s", ctrl->cur.val ? "true" : "false");
- break;
- case V4L2_CTRL_TYPE_MENU:
- printk(KERN_CONT "%s", ctrl->qmenu[ctrl->cur.val]);
- break;
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- printk(KERN_CONT "%lld", ctrl->qmenu_int[ctrl->cur.val]);
- break;
- case V4L2_CTRL_TYPE_BITMASK:
- printk(KERN_CONT "0x%08x", ctrl->cur.val);
- break;
- case V4L2_CTRL_TYPE_INTEGER64:
- printk(KERN_CONT "%lld", ctrl->cur.val64);
- break;
- case V4L2_CTRL_TYPE_STRING:
- printk(KERN_CONT "%s", ctrl->cur.string);
- break;
- default:
- printk(KERN_CONT "unknown type %d", ctrl->type);
- break;
- }
- if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
- V4L2_CTRL_FLAG_GRABBED |
- V4L2_CTRL_FLAG_VOLATILE)) {
- if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
- printk(KERN_CONT " inactive");
- if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
- printk(KERN_CONT " grabbed");
- if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
- printk(KERN_CONT " volatile");
- }
- printk(KERN_CONT "\n");
-}
-
-/* Log all controls owned by the handler */
-void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
- const char *prefix)
-{
- struct v4l2_ctrl *ctrl;
- const char *colon = "";
- int len;
-
- if (hdl == NULL)
- return;
- if (prefix == NULL)
- prefix = "";
- len = strlen(prefix);
- if (len && prefix[len - 1] != ' ')
- colon = ": ";
- mutex_lock(hdl->lock);
- list_for_each_entry(ctrl, &hdl->ctrls, node)
- if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
- log_ctrl(ctrl, prefix, colon);
- mutex_unlock(hdl->lock);
-}
-EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
-
-int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
-{
- v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
-
-/* Call s_ctrl for all controls owned by the handler */
-int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
-{
- struct v4l2_ctrl *ctrl;
- int ret = 0;
-
- if (hdl == NULL)
- return 0;
- mutex_lock(hdl->lock);
- list_for_each_entry(ctrl, &hdl->ctrls, node)
- ctrl->done = false;
-
- list_for_each_entry(ctrl, &hdl->ctrls, node) {
- struct v4l2_ctrl *master = ctrl->cluster[0];
- int i;
-
- /* Skip if this control was already handled by a cluster. */
- /* Skip button controls and read-only controls. */
- if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
- (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
- continue;
-
- for (i = 0; i < master->ncontrols; i++) {
- if (master->cluster[i]) {
- cur_to_new(master->cluster[i]);
- master->cluster[i]->is_new = 1;
- master->cluster[i]->done = true;
- }
- }
- ret = call_op(master, s_ctrl);
- if (ret)
- break;
- }
- mutex_unlock(hdl->lock);
- return ret;
-}
-EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
-
-/* Implement VIDIOC_QUERYCTRL */
-int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
-{
- u32 id = qc->id & V4L2_CTRL_ID_MASK;
- struct v4l2_ctrl_ref *ref;
- struct v4l2_ctrl *ctrl;
-
- if (hdl == NULL)
- return -EINVAL;
-
- mutex_lock(hdl->lock);
-
- /* Try to find it */
- ref = find_ref(hdl, id);
-
- if ((qc->id & V4L2_CTRL_FLAG_NEXT_CTRL) && !list_empty(&hdl->ctrl_refs)) {
- /* Find the next control with ID > qc->id */
-
- /* Did we reach the end of the control list? */
- if (id >= node2id(hdl->ctrl_refs.prev)) {
- ref = NULL; /* Yes, so there is no next control */
- } else if (ref) {
- /* We found a control with the given ID, so just get
- the next one in the list. */
- ref = list_entry(ref->node.next, typeof(*ref), node);
- } else {
- /* No control with the given ID exists, so start
- searching for the next largest ID. We know there
- is one, otherwise the first 'if' above would have
- been true. */
- list_for_each_entry(ref, &hdl->ctrl_refs, node)
- if (id < ref->ctrl->id)
- break;
- }
- }
- mutex_unlock(hdl->lock);
- if (!ref)
- return -EINVAL;
-
- ctrl = ref->ctrl;
- memset(qc, 0, sizeof(*qc));
- if (id >= V4L2_CID_PRIVATE_BASE)
- qc->id = id;
- else
- qc->id = ctrl->id;
- strlcpy(qc->name, ctrl->name, sizeof(qc->name));
- qc->minimum = ctrl->minimum;
- qc->maximum = ctrl->maximum;
- qc->default_value = ctrl->default_value;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU
- || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
- qc->step = 1;
- else
- qc->step = ctrl->step;
- qc->flags = ctrl->flags;
- qc->type = ctrl->type;
- return 0;
-}
-EXPORT_SYMBOL(v4l2_queryctrl);
-
-int v4l2_subdev_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- if (qc->id & V4L2_CTRL_FLAG_NEXT_CTRL)
- return -EINVAL;
- return v4l2_queryctrl(sd->ctrl_handler, qc);
-}
-EXPORT_SYMBOL(v4l2_subdev_queryctrl);
-
-/* Implement VIDIOC_QUERYMENU */
-int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
-{
- struct v4l2_ctrl *ctrl;
- u32 i = qm->index;
-
- ctrl = v4l2_ctrl_find(hdl, qm->id);
- if (!ctrl)
- return -EINVAL;
-
- qm->reserved = 0;
- /* Sanity checks */
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_MENU:
- if (ctrl->qmenu == NULL)
- return -EINVAL;
- break;
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- if (ctrl->qmenu_int == NULL)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- if (i < ctrl->minimum || i > ctrl->maximum)
- return -EINVAL;
-
- /* Use mask to see if this menu item should be skipped */
- if (ctrl->menu_skip_mask & (1 << i))
- return -EINVAL;
- /* Empty menu items should also be skipped */
- if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
- if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
- return -EINVAL;
- strlcpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
- } else {
- qm->value = ctrl->qmenu_int[i];
- }
- return 0;
-}
-EXPORT_SYMBOL(v4l2_querymenu);
-
-int v4l2_subdev_querymenu(struct v4l2_subdev *sd, struct v4l2_querymenu *qm)
-{
- return v4l2_querymenu(sd->ctrl_handler, qm);
-}
-EXPORT_SYMBOL(v4l2_subdev_querymenu);
-
-
-
-/* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
-
- It is not a fully atomic operation, just best-effort only. After all, if
- multiple controls have to be set through multiple i2c writes (for example)
- then some initial writes may succeed while others fail. Thus leaving the
- system in an inconsistent state. The question is how much effort you are
- willing to spend on trying to make something atomic that really isn't.
-
- From the point of view of an application the main requirement is that
- when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an
- error should be returned without actually affecting any controls.
-
- If all the values are correct, then it is acceptable to just give up
- in case of low-level errors.
-
- It is important though that the application can tell when only a partial
- configuration was done. The way we do that is through the error_idx field
- of struct v4l2_ext_controls: if that is equal to the count field then no
- controls were affected. Otherwise all controls before that index were
- successful in performing their 'get' or 'set' operation, the control at
- the given index failed, and you don't know what happened with the controls
- after the failed one. Since if they were part of a control cluster they
- could have been successfully processed (if a cluster member was encountered
- at index < error_idx), they could have failed (if a cluster member was at
- error_idx), or they may not have been processed yet (if the first cluster
- member appeared after error_idx).
-
- It is all fairly theoretical, though. In practice all you can do is to
- bail out. If error_idx == count, then it is an application bug. If
- error_idx < count then it is only an application bug if the error code was
- EBUSY. That usually means that something started streaming just when you
- tried to set the controls. In all other cases it is a driver/hardware
- problem and all you can do is to retry or bail out.
-
- Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that
- never modifies controls the error_idx is just set to whatever control
- has an invalid value.
- */
-
-/* Prepare for the extended g/s/try functions.
- Find the controls in the control array and do some basic checks. */
-static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- struct v4l2_ctrl_helper *helpers)
-{
- struct v4l2_ctrl_helper *h;
- bool have_clusters = false;
- u32 i;
-
- for (i = 0, h = helpers; i < cs->count; i++, h++) {
- struct v4l2_ext_control *c = &cs->controls[i];
- struct v4l2_ctrl_ref *ref;
- struct v4l2_ctrl *ctrl;
- u32 id = c->id & V4L2_CTRL_ID_MASK;
-
- cs->error_idx = i;
-
- if (cs->ctrl_class && V4L2_CTRL_ID2CLASS(id) != cs->ctrl_class)
- return -EINVAL;
-
- /* Old-style private controls are not allowed for
- extended controls */
- if (id >= V4L2_CID_PRIVATE_BASE)
- return -EINVAL;
- ref = find_ref_lock(hdl, id);
- if (ref == NULL)
- return -EINVAL;
- ctrl = ref->ctrl;
- if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
- return -EINVAL;
-
- if (ctrl->cluster[0]->ncontrols > 1)
- have_clusters = true;
- if (ctrl->cluster[0] != ctrl)
- ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
- /* Store the ref to the master control of the cluster */
- h->mref = ref;
- h->ctrl = ctrl;
- /* Initially set next to 0, meaning that there is no other
- control in this helper array belonging to the same
- cluster */
- h->next = 0;
- }
-
- /* We are done if there were no controls that belong to a multi-
- control cluster. */
- if (!have_clusters)
- return 0;
-
- /* The code below figures out in O(n) time which controls in the list
- belong to the same cluster. */
-
- /* This has to be done with the handler lock taken. */
- mutex_lock(hdl->lock);
-
- /* First zero the helper field in the master control references */
- for (i = 0; i < cs->count; i++)
- helpers[i].mref->helper = NULL;
- for (i = 0, h = helpers; i < cs->count; i++, h++) {
- struct v4l2_ctrl_ref *mref = h->mref;
-
- /* If the mref->helper is set, then it points to an earlier
- helper that belongs to the same cluster. */
- if (mref->helper) {
- /* Set the next field of mref->helper to the current
- index: this means that that earlier helper now
- points to the next helper in the same cluster. */
- mref->helper->next = i;
- /* mref should be set only for the first helper in the
- cluster, clear the others. */
- h->mref = NULL;
- }
- /* Point the mref helper to the current helper struct. */
- mref->helper = h;
- }
- mutex_unlock(hdl->lock);
- return 0;
-}
-
-/* Handles the corner case where cs->count == 0. It checks whether the
- specified control class exists. If that class ID is 0, then it checks
- whether there are any controls at all. */
-static int class_check(struct v4l2_ctrl_handler *hdl, u32 ctrl_class)
-{
- if (ctrl_class == 0)
- return list_empty(&hdl->ctrl_refs) ? -EINVAL : 0;
- return find_ref_lock(hdl, ctrl_class | 1) ? 0 : -EINVAL;
-}
-
-
-
-/* Get extended controls. Allocates the helpers array if needed. */
-int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
-{
- struct v4l2_ctrl_helper helper[4];
- struct v4l2_ctrl_helper *helpers = helper;
- int ret;
- int i, j;
-
- cs->error_idx = cs->count;
- cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
-
- if (hdl == NULL)
- return -EINVAL;
-
- if (cs->count == 0)
- return class_check(hdl, cs->ctrl_class);
-
- if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kmalloc_array(cs->count, sizeof(helper[0]),
- GFP_KERNEL);
- if (helpers == NULL)
- return -ENOMEM;
- }
-
- ret = prepare_ext_ctrls(hdl, cs, helpers);
- cs->error_idx = cs->count;
-
- for (i = 0; !ret && i < cs->count; i++)
- if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
- ret = -EACCES;
-
- for (i = 0; !ret && i < cs->count; i++) {
- int (*ctrl_to_user)(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl) = cur_to_user;
- struct v4l2_ctrl *master;
-
- if (helpers[i].mref == NULL)
- continue;
-
- master = helpers[i].mref->ctrl;
- cs->error_idx = i;
-
- v4l2_ctrl_lock(master);
-
- /* g_volatile_ctrl will update the new control values */
- if ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
- (master->has_volatiles && !is_cur_manual(master))) {
- for (j = 0; j < master->ncontrols; j++)
- cur_to_new(master->cluster[j]);
- ret = call_op(master, g_volatile_ctrl);
- ctrl_to_user = new_to_user;
- }
- /* If OK, then copy the current (for non-volatile controls)
- or the new (for volatile controls) control values to the
- caller */
- if (!ret) {
- u32 idx = i;
-
- do {
- ret = ctrl_to_user(cs->controls + idx,
- helpers[idx].ctrl);
- idx = helpers[idx].next;
- } while (!ret && idx);
- }
- v4l2_ctrl_unlock(master);
- }
-
- if (cs->count > ARRAY_SIZE(helper))
- kfree(helpers);
- return ret;
-}
-EXPORT_SYMBOL(v4l2_g_ext_ctrls);
-
-int v4l2_subdev_g_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
-{
- return v4l2_g_ext_ctrls(sd->ctrl_handler, cs);
-}
-EXPORT_SYMBOL(v4l2_subdev_g_ext_ctrls);
-
-/* Helper function to get a single control */
-static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
-{
- struct v4l2_ctrl *master = ctrl->cluster[0];
- int ret = 0;
- int i;
-
- /* String controls are not supported. The new_to_user() and
- * cur_to_user() calls below would need to be modified not to access
- * userspace memory when called from get_ctrl().
- */
- if (ctrl->type == V4L2_CTRL_TYPE_STRING)
- return -EINVAL;
-
- if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
- return -EACCES;
-
- v4l2_ctrl_lock(master);
- /* g_volatile_ctrl will update the current control values */
- if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
- for (i = 0; i < master->ncontrols; i++)
- cur_to_new(master->cluster[i]);
- ret = call_op(master, g_volatile_ctrl);
- new_to_user(c, ctrl);
- } else {
- cur_to_user(c, ctrl);
- }
- v4l2_ctrl_unlock(master);
- return ret;
-}
-
-int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
-{
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
- struct v4l2_ext_control c;
- int ret;
-
- if (ctrl == NULL || !type_is_int(ctrl))
- return -EINVAL;
- ret = get_ctrl(ctrl, &c);
- control->value = c.value;
- return ret;
-}
-EXPORT_SYMBOL(v4l2_g_ctrl);
-
-int v4l2_subdev_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
-{
- return v4l2_g_ctrl(sd->ctrl_handler, control);
-}
-EXPORT_SYMBOL(v4l2_subdev_g_ctrl);
-
-s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_ext_control c;
-
- /* It's a driver bug if this happens. */
- WARN_ON(!type_is_int(ctrl));
- c.value = 0;
- get_ctrl(ctrl, &c);
- return c.value;
-}
-EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
-
-s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_ext_control c;
-
- /* It's a driver bug if this happens. */
- WARN_ON(ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
- c.value = 0;
- get_ctrl(ctrl, &c);
- return c.value;
-}
-EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
-
-
-/* Core function that calls try/s_ctrl and ensures that the new value is
- copied to the current value on a set.
- Must be called with ctrl->handler->lock held. */
-static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
- bool set, u32 ch_flags)
-{
- bool update_flag;
- int ret;
- int i;
-
- /* Go through the cluster and either validate the new value or
- (if no new value was set), copy the current value to the new
- value, ensuring a consistent view for the control ops when
- called. */
- for (i = 0; i < master->ncontrols; i++) {
- struct v4l2_ctrl *ctrl = master->cluster[i];
-
- if (ctrl == NULL)
- continue;
-
- if (!ctrl->is_new) {
- cur_to_new(ctrl);
- continue;
- }
- /* Check again: it may have changed since the
- previous check in try_or_set_ext_ctrls(). */
- if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
- return -EBUSY;
- }
-
- ret = call_op(master, try_ctrl);
-
- /* Don't set if there is no change */
- if (ret || !set || !cluster_changed(master))
- return ret;
- ret = call_op(master, s_ctrl);
- if (ret)
- return ret;
-
- /* If OK, then make the new values permanent. */
- update_flag = is_cur_manual(master) != is_new_manual(master);
- for (i = 0; i < master->ncontrols; i++)
- new_to_cur(fh, master->cluster[i], ch_flags |
- ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
- return 0;
-}
-
-/* Validate controls. */
-static int validate_ctrls(struct v4l2_ext_controls *cs,
- struct v4l2_ctrl_helper *helpers, bool set)
-{
- unsigned i;
- int ret = 0;
-
- cs->error_idx = cs->count;
- for (i = 0; i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
-
- cs->error_idx = i;
-
- if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
- return -EACCES;
- /* This test is also done in try_set_control_cluster() which
- is called in atomic context, so that has the final say,
- but it makes sense to do an up-front check as well. Once
- an error occurs in try_set_control_cluster() some other
- controls may have been set already and we want to do a
- best-effort to avoid that. */
- if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
- return -EBUSY;
- ret = validate_new(ctrl, &cs->controls[i]);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-/* Obtain the current volatile values of an autocluster and mark them
- as new. */
-static void update_from_auto_cluster(struct v4l2_ctrl *master)
-{
- int i;
-
- for (i = 0; i < master->ncontrols; i++)
- cur_to_new(master->cluster[i]);
- if (!call_op(master, g_volatile_ctrl))
- for (i = 1; i < master->ncontrols; i++)
- if (master->cluster[i])
- master->cluster[i]->is_new = 1;
-}
-
-/* Try or try-and-set controls */
-static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- bool set)
-{
- struct v4l2_ctrl_helper helper[4];
- struct v4l2_ctrl_helper *helpers = helper;
- unsigned i, j;
- int ret;
-
- cs->error_idx = cs->count;
- cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
-
- if (hdl == NULL)
- return -EINVAL;
-
- if (cs->count == 0)
- return class_check(hdl, cs->ctrl_class);
-
- if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kmalloc_array(cs->count, sizeof(helper[0]),
- GFP_KERNEL);
- if (!helpers)
- return -ENOMEM;
- }
- ret = prepare_ext_ctrls(hdl, cs, helpers);
- if (!ret)
- ret = validate_ctrls(cs, helpers, set);
- if (ret && set)
- cs->error_idx = cs->count;
- for (i = 0; !ret && i < cs->count; i++) {
- struct v4l2_ctrl *master;
- u32 idx = i;
-
- if (helpers[i].mref == NULL)
- continue;
-
- cs->error_idx = i;
- master = helpers[i].mref->ctrl;
- v4l2_ctrl_lock(master);
-
- /* Reset the 'is_new' flags of the cluster */
- for (j = 0; j < master->ncontrols; j++)
- if (master->cluster[j])
- master->cluster[j]->is_new = 0;
-
- /* For volatile autoclusters that are currently in auto mode
- we need to discover if it will be set to manual mode.
- If so, then we have to copy the current volatile values
- first since those will become the new manual values (which
- may be overwritten by explicit new values from this set
- of controls). */
- if (master->is_auto && master->has_volatiles &&
- !is_cur_manual(master)) {
- /* Pick an initial non-manual value */
- s32 new_auto_val = master->manual_mode_value + 1;
- u32 tmp_idx = idx;
-
- do {
- /* Check if the auto control is part of the
- list, and remember the new value. */
- if (helpers[tmp_idx].ctrl == master)
- new_auto_val = cs->controls[tmp_idx].value;
- tmp_idx = helpers[tmp_idx].next;
- } while (tmp_idx);
- /* If the new value == the manual value, then copy
- the current volatile values. */
- if (new_auto_val == master->manual_mode_value)
- update_from_auto_cluster(master);
- }
-
- /* Copy the new caller-supplied control values.
- user_to_new() sets 'is_new' to 1. */
- do {
- ret = user_to_new(cs->controls + idx, helpers[idx].ctrl);
- idx = helpers[idx].next;
- } while (!ret && idx);
-
- if (!ret)
- ret = try_or_set_cluster(fh, master, set, 0);
-
- /* Copy the new values back to userspace. */
- if (!ret) {
- idx = i;
- do {
- ret = new_to_user(cs->controls + idx,
- helpers[idx].ctrl);
- idx = helpers[idx].next;
- } while (!ret && idx);
- }
- v4l2_ctrl_unlock(master);
- }
-
- if (cs->count > ARRAY_SIZE(helper))
- kfree(helpers);
- return ret;
-}
-
-int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
-{
- return try_set_ext_ctrls(NULL, hdl, cs, false);
-}
-EXPORT_SYMBOL(v4l2_try_ext_ctrls);
-
-int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs)
-{
- return try_set_ext_ctrls(fh, hdl, cs, true);
-}
-EXPORT_SYMBOL(v4l2_s_ext_ctrls);
-
-int v4l2_subdev_try_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
-{
- return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, false);
-}
-EXPORT_SYMBOL(v4l2_subdev_try_ext_ctrls);
-
-int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
-{
- return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, true);
-}
-EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
-
-/* Helper function for VIDIOC_S_CTRL compatibility */
-static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- struct v4l2_ext_control *c, u32 ch_flags)
-{
- struct v4l2_ctrl *master = ctrl->cluster[0];
- int i;
-
- /* String controls are not supported. The user_to_new() and
- * cur_to_user() calls below would need to be modified not to access
- * userspace memory when called from set_ctrl().
- */
- if (ctrl->type == V4L2_CTRL_TYPE_STRING)
- return -EINVAL;
-
- /* Reset the 'is_new' flags of the cluster */
- for (i = 0; i < master->ncontrols; i++)
- if (master->cluster[i])
- master->cluster[i]->is_new = 0;
-
- /* For autoclusters with volatiles that are switched from auto to
- manual mode we have to update the current volatile values since
- those will become the initial manual values after such a switch. */
- if (master->is_auto && master->has_volatiles && ctrl == master &&
- !is_cur_manual(master) && c->value == master->manual_mode_value)
- update_from_auto_cluster(master);
-
- user_to_new(c, ctrl);
- return try_or_set_cluster(fh, master, true, ch_flags);
-}
-
-/* Helper function for VIDIOC_S_CTRL compatibility */
-static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- struct v4l2_ext_control *c)
-{
- int ret = validate_new(ctrl, c);
-
- if (!ret) {
- v4l2_ctrl_lock(ctrl);
- ret = set_ctrl(fh, ctrl, c, 0);
- if (!ret)
- cur_to_user(c, ctrl);
- v4l2_ctrl_unlock(ctrl);
- }
- return ret;
-}
-
-int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_control *control)
-{
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
- struct v4l2_ext_control c;
- int ret;
-
- if (ctrl == NULL || !type_is_int(ctrl))
- return -EINVAL;
-
- if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
- return -EACCES;
-
- c.value = control->value;
- ret = set_ctrl_lock(fh, ctrl, &c);
- control->value = c.value;
- return ret;
-}
-EXPORT_SYMBOL(v4l2_s_ctrl);
-
-int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
-{
- return v4l2_s_ctrl(NULL, sd->ctrl_handler, control);
-}
-EXPORT_SYMBOL(v4l2_subdev_s_ctrl);
-
-int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
-{
- struct v4l2_ext_control c;
-
- /* It's a driver bug if this happens. */
- WARN_ON(!type_is_int(ctrl));
- c.value = val;
- return set_ctrl_lock(NULL, ctrl, &c);
-}
-EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
-
-int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
-{
- struct v4l2_ext_control c;
-
- /* It's a driver bug if this happens. */
- WARN_ON(ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
- c.value64 = val;
- return set_ctrl_lock(NULL, ctrl, &c);
-}
-EXPORT_SYMBOL(v4l2_ctrl_s_ctrl_int64);
-
-void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
-{
- if (ctrl == NULL)
- return;
- if (notify == NULL) {
- ctrl->call_notify = 0;
- return;
- }
- if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
- return;
- ctrl->handler->notify = notify;
- ctrl->handler->notify_priv = priv;
- ctrl->call_notify = 1;
-}
-EXPORT_SYMBOL(v4l2_ctrl_notify);
-
-int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
- s32 min, s32 max, u32 step, s32 def)
-{
- int ret = check_range(ctrl->type, min, max, step, def);
- struct v4l2_ext_control c;
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER:
- case V4L2_CTRL_TYPE_BOOLEAN:
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- case V4L2_CTRL_TYPE_BITMASK:
- if (ret)
- return ret;
- break;
- default:
- return -EINVAL;
- }
- v4l2_ctrl_lock(ctrl);
- ctrl->minimum = min;
- ctrl->maximum = max;
- ctrl->step = step;
- ctrl->default_value = def;
- c.value = ctrl->cur.val;
- if (validate_new(ctrl, &c))
- c.value = def;
- if (c.value != ctrl->cur.val)
- ret = set_ctrl(NULL, ctrl, &c, V4L2_EVENT_CTRL_CH_RANGE);
- else
- send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
- v4l2_ctrl_unlock(ctrl);
- return ret;
-}
-EXPORT_SYMBOL(v4l2_ctrl_modify_range);
-
-static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
-{
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
-
- if (ctrl == NULL)
- return -EINVAL;
-
- v4l2_ctrl_lock(ctrl);
- list_add_tail(&sev->node, &ctrl->ev_subs);
- if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
- (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) {
- struct v4l2_event ev;
- u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
-
- if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
- changes |= V4L2_EVENT_CTRL_CH_VALUE;
- fill_event(&ev, ctrl, changes);
- /* Mark the queue as active, allowing this initial
- event to be accepted. */
- sev->elems = elems;
- v4l2_event_queue_fh(sev->fh, &ev);
- }
- v4l2_ctrl_unlock(ctrl);
- return 0;
-}
-
-static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
-{
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
-
- v4l2_ctrl_lock(ctrl);
- list_del(&sev->node);
- v4l2_ctrl_unlock(ctrl);
-}
-
-void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
-{
- u32 old_changes = old->u.ctrl.changes;
-
- old->u.ctrl = new->u.ctrl;
- old->u.ctrl.changes |= old_changes;
-}
-EXPORT_SYMBOL(v4l2_ctrl_replace);
-
-void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
-{
- new->u.ctrl.changes |= old->u.ctrl.changes;
-}
-EXPORT_SYMBOL(v4l2_ctrl_merge);
-
-const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
- .add = v4l2_ctrl_add_event,
- .del = v4l2_ctrl_del_event,
- .replace = v4l2_ctrl_replace,
- .merge = v4l2_ctrl_merge,
-};
-EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
-
-int v4l2_ctrl_log_status(struct file *file, void *fh)
-{
- struct video_device *vfd = video_devdata(file);
- struct v4l2_fh *vfh = file->private_data;
-
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev)
- v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
- vfd->v4l2_dev->name);
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_log_status);
-
-int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub)
-{
- if (sub->type == V4L2_EVENT_CTRL)
- return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
- return -EINVAL;
-}
-EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
-
-int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- if (!sd->ctrl_handler)
- return -EINVAL;
- return v4l2_ctrl_subscribe_event(fh, sub);
-}
-EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
-
-unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct v4l2_fh *fh = file->private_data;
-
- if (v4l2_event_pending(fh))
- return POLLPRI;
- poll_wait(file, &fh->wait, wait);
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_poll);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index c8859d6ff6ad..10a126e50c1c 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -1,21 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Video capture interface for Linux version 2
*
* A generic video device interface for the LINUX operating system
* using a set of device structures/vectors for low level operations.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
- * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
*
* Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com>
* - Added procfs support
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -25,37 +24,44 @@
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#define VIDEO_NUM_DEVICES 256
#define VIDEO_NAME "video4linux"
+#define dprintk(fmt, arg...) do { \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
+
/*
* sysfs stuff
*/
-static ssize_t show_index(struct device *cd,
- struct device_attribute *attr, char *buf)
+static ssize_t index_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%i\n", vdev->index);
}
+static DEVICE_ATTR_RO(index);
-static ssize_t show_debug(struct device *cd,
- struct device_attribute *attr, char *buf)
+static ssize_t dev_debug_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
- return sprintf(buf, "%i\n", vdev->debug);
+ return sprintf(buf, "%i\n", vdev->dev_debug);
}
-static ssize_t set_debug(struct device *cd, struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t dev_debug_store(struct device *cd, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct video_device *vdev = to_video_device(cd);
int res = 0;
@@ -65,29 +71,34 @@ static ssize_t set_debug(struct device *cd, struct device_attribute *attr,
if (res)
return res;
- vdev->debug = value;
+ vdev->dev_debug = value;
return len;
}
+static DEVICE_ATTR_RW(dev_debug);
-static ssize_t show_name(struct device *cd,
+static ssize_t name_show(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name);
}
+static DEVICE_ATTR_RO(name);
-static struct device_attribute video_device_attrs[] = {
- __ATTR(name, S_IRUGO, show_name, NULL),
- __ATTR(debug, 0644, show_debug, set_debug),
- __ATTR(index, S_IRUGO, show_index, NULL),
- __ATTR_NULL
+static struct attribute *video_device_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_dev_debug.attr,
+ &dev_attr_index.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(video_device);
+
+static struct dentry *v4l2_debugfs_root_dir;
/*
* Active devices
*/
-static struct video_device *video_device[VIDEO_NUM_DEVICES];
+static struct video_device *video_devices[VIDEO_NUM_DEVICES];
static DEFINE_MUTEX(videodev_lock);
static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES);
@@ -98,7 +109,7 @@ static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES);
#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
/* Return the bitmap corresponding to vfl_type. */
-static inline unsigned long *devnode_bits(int vfl_type)
+static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type)
{
/* Any types not assigned to fixed minor ranges must be mapped to
one single bitmap for the purposes of finding a free node number
@@ -109,7 +120,7 @@ static inline unsigned long *devnode_bits(int vfl_type)
}
#else
/* Return the bitmap corresponding to vfl_type. */
-static inline unsigned long *devnode_bits(int vfl_type)
+static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type)
{
return devnode_nums[vfl_type];
}
@@ -169,14 +180,14 @@ static void v4l2_device_release(struct device *cd)
struct v4l2_device *v4l2_dev = vdev->v4l2_dev;
mutex_lock(&videodev_lock);
- if (WARN_ON(video_device[vdev->minor] != vdev)) {
+ if (WARN_ON(video_devices[vdev->minor] != vdev)) {
/* should not happen */
mutex_unlock(&videodev_lock);
return;
}
/* Free up this device for reuse */
- video_device[vdev->minor] = NULL;
+ video_devices[vdev->minor] = NULL;
/* Delete the cdev on this minor as well */
cdev_del(vdev->cdev);
@@ -190,9 +201,12 @@ static void v4l2_device_release(struct device *cd)
mutex_unlock(&videodev_lock);
#if defined(CONFIG_MEDIA_CONTROLLER)
- if (v4l2_dev && v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_device_unregister_entity(&vdev->entity);
+ if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) {
+ /* Remove interfaces and interface links */
+ media_devnode_remove(vdev->intf_devnode);
+ if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN)
+ media_device_unregister_entity(&vdev->entity);
+ }
#endif
/* Do not call v4l2_device_put if there is no release callback set.
@@ -203,7 +217,7 @@ static void v4l2_device_release(struct device *cd)
* TODO: In the long run all drivers that use v4l2_device should use the
* v4l2_device release callback. This check will then be unnecessary.
*/
- if (v4l2_dev && v4l2_dev->release == NULL)
+ if (v4l2_dev->release == NULL)
v4l2_dev = NULL;
/* Release video_device and perform other
@@ -215,14 +229,14 @@ static void v4l2_device_release(struct device *cd)
v4l2_device_put(v4l2_dev);
}
-static struct class video_class = {
+static const struct class video_class = {
.name = VIDEO_NAME,
- .dev_attrs = video_device_attrs,
+ .dev_groups = video_device_groups,
};
struct video_device *video_devdata(struct file *file)
{
- return video_device[iminor(file_inode(file))];
+ return video_devices[iminor(file_inode(file))];
}
EXPORT_SYMBOL(video_devdata);
@@ -300,8 +314,9 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf,
return -EINVAL;
if (video_is_registered(vdev))
ret = vdev->fops->read(filp, buf, sz, off);
- if (vdev->debug)
- printk(KERN_DEBUG "%s: read: %zd (%d)\n",
+ if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
+ (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
+ dprintk("%s: read: %zd (%d)\n",
video_device_node_name(vdev), sz, ret);
return ret;
}
@@ -316,24 +331,28 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
return -EINVAL;
if (video_is_registered(vdev))
ret = vdev->fops->write(filp, buf, sz, off);
- if (vdev->debug)
- printk(KERN_DEBUG "%s: write: %zd (%d)\n",
+ if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
+ (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
+ dprintk("%s: write: %zd (%d)\n",
video_device_node_name(vdev), sz, ret);
return ret;
}
-static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
+static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
- unsigned int res = POLLERR | POLLHUP;
+ __poll_t res = EPOLLERR | EPOLLHUP | EPOLLPRI;
- if (!vdev->fops->poll)
- return DEFAULT_POLLMASK;
- if (video_is_registered(vdev))
- res = vdev->fops->poll(filp, poll);
- if (vdev->debug)
- printk(KERN_DEBUG "%s: poll: %08x\n",
- video_device_node_name(vdev), res);
+ if (video_is_registered(vdev)) {
+ if (!vdev->fops->poll)
+ res = DEFAULT_POLLMASK;
+ else
+ res = vdev->fops->poll(filp, poll);
+ }
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL)
+ dprintk("%s: poll: %08x %08x\n",
+ video_device_node_name(vdev), res,
+ poll_requested_events(poll));
return res;
}
@@ -343,47 +362,8 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int ret = -ENODEV;
if (vdev->fops->unlocked_ioctl) {
- struct mutex *lock = v4l2_ioctl_get_lock(vdev, cmd);
-
- if (lock && mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
if (video_is_registered(vdev))
ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
- if (lock)
- mutex_unlock(lock);
- } else if (vdev->fops->ioctl) {
- /* This code path is a replacement for the BKL. It is a major
- * hack but it will have to do for those drivers that are not
- * yet converted to use unlocked_ioctl.
- *
- * There are two options: if the driver implements struct
- * v4l2_device, then the lock defined there is used to
- * serialize the ioctls. Otherwise the v4l2 core lock defined
- * below is used. This lock is really bad since it serializes
- * completely independent devices.
- *
- * Both variants suffer from the same problem: if the driver
- * sleeps, then it blocks all ioctls since the lock is still
- * held. This is very common for VIDIOC_DQBUF since that
- * normally waits for a frame to arrive. As a result any other
- * ioctl calls will proceed very, very slowly since each call
- * will have to wait for the VIDIOC_QBUF to finish. Things that
- * should take 0.01s may now take 10-20 seconds.
- *
- * The workaround is to *not* take the lock for VIDIOC_DQBUF.
- * This actually works OK for videobuf-based drivers, since
- * videobuf will take its own internal lock.
- */
- static DEFINE_MUTEX(v4l2_ioctl_mutex);
- struct mutex *m = vdev->v4l2_dev ?
- &vdev->v4l2_dev->ioctl_lock : &v4l2_ioctl_mutex;
-
- if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m))
- return -ERESTARTSYS;
- if (video_is_registered(vdev))
- ret = vdev->fops->ioctl(filp, cmd, arg);
- if (cmd != VIDIOC_DQBUF)
- mutex_unlock(m);
} else
ret = -ENOTTY;
@@ -405,8 +385,8 @@ static unsigned long v4l2_get_unmapped_area(struct file *filp,
if (!video_is_registered(vdev))
return -ENODEV;
ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
- if (vdev->debug)
- printk(KERN_DEBUG "%s: get_unmapped_area (%d)\n",
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ dprintk("%s: get_unmapped_area (%d)\n",
video_device_node_name(vdev), ret);
return ret;
}
@@ -421,8 +401,8 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
return -ENODEV;
if (video_is_registered(vdev))
ret = vdev->fops->mmap(filp, vm);
- if (vdev->debug)
- printk(KERN_DEBUG "%s: mmap (%d)\n",
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ dprintk("%s: mmap (%d)\n",
video_device_node_name(vdev), ret);
return ret;
}
@@ -431,7 +411,7 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
static int v4l2_open(struct inode *inode, struct file *filp)
{
struct video_device *vdev;
- int ret = 0;
+ int ret;
/* Check if the video device is available */
mutex_lock(&videodev_lock);
@@ -444,16 +424,27 @@ static int v4l2_open(struct inode *inode, struct file *filp)
/* and increase the device refcount */
video_get(vdev);
mutex_unlock(&videodev_lock);
- if (vdev->fops->open) {
- if (video_is_registered(vdev))
- ret = vdev->fops->open(filp);
- else
- ret = -ENODEV;
+
+ if (!video_is_registered(vdev)) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ ret = vdev->fops->open(filp);
+ if (ret)
+ goto done;
+
+ /* All drivers must use v4l2_fh. */
+ if (WARN_ON(!test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))) {
+ vdev->fops->release(filp);
+ ret = -ENODEV;
}
- if (vdev->debug)
- printk(KERN_DEBUG "%s: open (%d)\n",
+done:
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ dprintk("%s: open (%d)\n",
video_device_node_name(vdev), ret);
+
/* decrease the refcount in case of an error */
if (ret)
video_put(vdev);
@@ -464,12 +455,24 @@ static int v4l2_open(struct inode *inode, struct file *filp)
static int v4l2_release(struct inode *inode, struct file *filp)
{
struct video_device *vdev = video_devdata(filp);
- int ret = 0;
+ int ret;
- if (vdev->fops->release)
+ /*
+ * We need to serialize the release() with queueing new requests.
+ * The release() may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
+ mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ ret = vdev->fops->release(filp);
+ mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ } else {
ret = vdev->fops->release(filp);
- if (vdev->debug)
- printk(KERN_DEBUG "%s: release\n",
+ }
+
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ dprintk("%s: release\n",
video_device_node_name(vdev));
/* decrease the refcount unconditionally since the release()
@@ -491,7 +494,6 @@ static const struct file_operations v4l2_fops = {
#endif
.release = v4l2_release,
.poll = v4l2_poll,
- .llseek = no_llseek,
};
/**
@@ -517,18 +519,17 @@ static int get_index(struct video_device *vdev)
bitmap_zero(used, VIDEO_NUM_DEVICES);
for (i = 0; i < VIDEO_NUM_DEVICES; i++) {
- if (video_device[i] != NULL &&
- video_device[i]->v4l2_dev == vdev->v4l2_dev) {
- set_bit(video_device[i]->index, used);
+ if (video_devices[i] != NULL &&
+ video_devices[i]->v4l2_dev == vdev->v4l2_dev) {
+ __set_bit(video_devices[i]->index, used);
}
}
return find_first_zero_bit(used, VIDEO_NUM_DEVICES);
}
-#define SET_VALID_IOCTL(ops, cmd, op) \
- if (ops->op) \
- set_bit(_IOC_NR(cmd), valid_ioctls)
+#define SET_VALID_IOCTL(ops, cmd, op) \
+ do { if ((ops)->op) __set_bit(_IOC_NR(cmd), valid_ioctls); } while (0)
/* This determines which ioctls are actually implemented in the driver.
It's a one-time thing which simplifies video_ioctl2 as it can just do
@@ -545,89 +546,101 @@ static int get_index(struct video_device *vdev)
*/
static void determine_valid_ioctls(struct video_device *vdev)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
- bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO &&
+ (vdev->device_caps & vid_caps);
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
+ bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO &&
+ (vdev->device_caps & meta_caps);
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
+ bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC;
+ bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING;
+ bool is_edid = vdev->device_caps & V4L2_CAP_EDID;
bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
/* vfl_type and vfl_dir independent ioctls */
SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap);
- if (ops->vidioc_g_priority ||
- test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
- set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
- if (ops->vidioc_s_priority ||
- test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
- set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
- SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
+ __set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
+
/* Note: the control handler can also be passed through the filehandle,
and that can't be tested here. If the bit for these control ioctls
is set, then the ioctl is valid. But if it is 0, then it can still
be valid if the filehandle passed the control handler. */
- if (vdev->ctrl_handler || ops->vidioc_queryctrl)
- set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
- if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
- if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
+ __set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
+ __set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
+ __set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
+ __set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls)
- set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_querymenu)
- set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
- SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ __set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
+ if (!is_tch) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ }
SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
#ifdef CONFIG_VIDEO_ADV_DEBUG
- set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls);
- set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls);
#endif
/* yes, really vidioc_subscribe_event */
SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
- set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
if (is_vid) {
/* video specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
- ops->vidioc_enum_fmt_vid_cap_mplane ||
ops->vidioc_enum_fmt_vid_overlay)) ||
- (is_tx && (ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_vid_out_mplane)))
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ (is_tx && ops->vidioc_enum_fmt_vid_out))
+ __set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
ops->vidioc_g_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
ops->vidioc_g_fmt_vid_out_overlay)))
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
ops->vidioc_s_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
ops->vidioc_s_fmt_vid_out_overlay)))
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
ops->vidioc_try_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
ops->vidioc_try_fmt_vid_out_overlay)))
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf);
@@ -640,27 +653,77 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
- } else if (is_vbi) {
+ if (ops->vidioc_g_selection &&
+ !test_bit(_IOC_NR(VIDIOC_G_SELECTION), vdev->valid_ioctls)) {
+ __set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
+ }
+ if (ops->vidioc_s_selection &&
+ !test_bit(_IOC_NR(VIDIOC_S_SELECTION), vdev->valid_ioctls))
+ __set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
+ SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
+ }
+ if (is_meta && is_rx) {
+ /* metadata capture specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap);
+ } else if (is_meta && is_tx) {
+ /* metadata output specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out);
+ }
+ if (is_vbi) {
/* vbi specific ioctls */
if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
ops->vidioc_g_fmt_sliced_vbi_cap)) ||
(is_tx && (ops->vidioc_g_fmt_vbi_out ||
ops->vidioc_g_fmt_sliced_vbi_out)))
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vbi_cap ||
ops->vidioc_s_fmt_sliced_vbi_cap)) ||
(is_tx && (ops->vidioc_s_fmt_vbi_out ||
ops->vidioc_s_fmt_sliced_vbi_out)))
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vbi_cap ||
ops->vidioc_try_fmt_sliced_vbi_cap)) ||
(is_tx && (ops->vidioc_try_fmt_vbi_out ||
ops->vidioc_try_fmt_sliced_vbi_out)))
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ } else if (is_tch) {
+ /* touch specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
+ } else if (is_sdr && is_rx) {
+ /* SDR receiver specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap);
+ } else if (is_sdr && is_tx) {
+ /* SDR transmitter specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out);
}
- if (!is_radio) {
- /* ioctls valid for video or vbi */
+
+ if (has_streaming) {
+ /* ioctls valid for streaming I/O */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
@@ -668,95 +731,186 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
+ /* VIDIOC_CREATE_BUFS support is mandatory to enable VIDIOC_REMOVE_BUFS */
+ if (ops->vidioc_create_bufs)
+ SET_VALID_IOCTL(ops, VIDIOC_REMOVE_BUFS, vidioc_remove_bufs);
+ }
+
+ if (is_vid || is_vbi || is_meta) {
+ /* ioctls valid for video, vbi and metadata */
if (ops->vidioc_s_std)
- set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std);
if (is_rx) {
SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
- SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
- SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
- SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ if (is_io_mc) {
+ __set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls);
+ } else {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ }
SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio);
SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
}
if (is_tx) {
- SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
- SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
- SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ if (is_io_mc) {
+ __set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls);
+ } else {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
+ SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
+ SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ }
SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout);
SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
}
- if (ops->vidioc_g_crop || ops->vidioc_g_selection)
- set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
- if (ops->vidioc_s_crop || ops->vidioc_s_selection)
- set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
- SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
- if (ops->vidioc_cropcap || ops->vidioc_g_selection)
- set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
- if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
- ops->vidioc_g_std))
- set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
+ if (ops->vidioc_g_parm || ops->vidioc_g_std)
+ __set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);
}
- if (is_tx) {
- /* transmitter only ioctls */
+ if (is_tx && (is_radio || is_sdr)) {
+ /* radio transmitter only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
}
- if (is_rx) {
+ if (is_rx && !is_tch) {
/* receiver only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek);
}
+ if (is_edid) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);
+ if (is_tx) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
+ SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
+ }
+ if (is_rx) {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
+ }
+ }
bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
BASE_VIDIOC_PRIVATE);
}
-/**
- * __video_register_device - register video4linux devices
- * @vdev: video device structure we want to register
- * @type: type of device to register
- * @nr: which device node number (0 == /dev/video0, 1 == /dev/video1, ...
- * -1 == first free)
- * @warn_if_nr_in_use: warn if the desired device node number
- * was already in use and another number was chosen instead.
- * @owner: module that owns the video device node
- *
- * The registration code assigns minor numbers and device node numbers
- * based on the requested type and registers the new device node with
- * the kernel.
- *
- * This function assumes that struct video_device was zeroed when it
- * was allocated and does not contain any stale date.
- *
- * An error is returned if no free minor or device node number could be
- * found, or if the registration of the device node failed.
- *
- * Zero is returned on success.
- *
- * Valid types are
- *
- * %VFL_TYPE_GRABBER - A frame grabber
- *
- * %VFL_TYPE_VBI - Vertical blank data (undecoded)
- *
- * %VFL_TYPE_RADIO - A radio card
- *
- * %VFL_TYPE_SUBDEV - A subdevice
- */
-int __video_register_device(struct video_device *vdev, int type, int nr,
- int warn_if_nr_in_use, struct module *owner)
+static int video_register_media_controller(struct video_device *vdev)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ u32 intf_type;
+ int ret;
+
+ /* Memory-to-memory devices are more complex and use
+ * their own function to register its mc entities.
+ */
+ if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M)
+ return 0;
+
+ vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
+ vdev->entity.function = MEDIA_ENT_F_UNKNOWN;
+
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_VIDEO:
+ intf_type = MEDIA_INTF_T_V4L_VIDEO;
+ vdev->entity.function = MEDIA_ENT_F_IO_V4L;
+ break;
+ case VFL_TYPE_VBI:
+ intf_type = MEDIA_INTF_T_V4L_VBI;
+ vdev->entity.function = MEDIA_ENT_F_IO_VBI;
+ break;
+ case VFL_TYPE_SDR:
+ intf_type = MEDIA_INTF_T_V4L_SWRADIO;
+ vdev->entity.function = MEDIA_ENT_F_IO_SWRADIO;
+ break;
+ case VFL_TYPE_TOUCH:
+ intf_type = MEDIA_INTF_T_V4L_TOUCH;
+ vdev->entity.function = MEDIA_ENT_F_IO_V4L;
+ break;
+ case VFL_TYPE_RADIO:
+ intf_type = MEDIA_INTF_T_V4L_RADIO;
+ /*
+ * Radio doesn't have an entity at the V4L2 side to represent
+ * radio input or output. Instead, the audio input/output goes
+ * via either physical wires or ALSA.
+ */
+ break;
+ case VFL_TYPE_SUBDEV:
+ intf_type = MEDIA_INTF_T_V4L_SUBDEV;
+ /* Entity will be created via v4l2_device_register_subdev() */
+ break;
+ default:
+ return 0;
+ }
+
+ if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) {
+ vdev->entity.name = vdev->name;
+
+ /* Needed just for backward compatibility with legacy MC API */
+ vdev->entity.info.dev.major = VIDEO_MAJOR;
+ vdev->entity.info.dev.minor = vdev->minor;
+
+ ret = media_device_register_entity(vdev->v4l2_dev->mdev,
+ &vdev->entity);
+ if (ret < 0) {
+ pr_warn("%s: media_device_register_entity failed\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ vdev->intf_devnode = media_devnode_create(vdev->v4l2_dev->mdev,
+ intf_type,
+ 0, VIDEO_MAJOR,
+ vdev->minor);
+ if (!vdev->intf_devnode) {
+ media_device_unregister_entity(&vdev->entity);
+ return -ENOMEM;
+ }
+
+ if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) {
+ struct media_link *link;
+
+ link = media_create_intf_link(&vdev->entity,
+ &vdev->intf_devnode->intf,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (!link) {
+ media_devnode_remove(vdev->intf_devnode);
+ media_device_unregister_entity(&vdev->entity);
+ return -ENOMEM;
+ }
+ }
+
+ /* FIXME: how to create the other interface links? */
+
+#endif
+ return 0;
+}
+
+int __video_register_device(struct video_device *vdev,
+ enum vfl_devnode_type type,
+ int nr, int warn_if_nr_in_use,
+ struct module *owner)
{
int i = 0;
int ret;
@@ -774,6 +928,12 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* the v4l2_dev pointer MUST be present */
if (WARN_ON(!vdev->v4l2_dev))
return -EINVAL;
+ /* the device_caps field MUST be set for all but subdevs */
+ if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps))
+ return -EINVAL;
+ /* the open and release file operations are mandatory */
+ if (WARN_ON(!vdev->fops || !vdev->fops->open || !vdev->fops->release))
+ return -EINVAL;
/* v4l2_fh support */
spin_lock_init(&vdev->fh_lock);
@@ -781,7 +941,7 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* Part 1: check device type */
switch (type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
name_base = "video";
break;
case VFL_TYPE_VBI:
@@ -793,8 +953,15 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
case VFL_TYPE_SUBDEV:
name_base = "v4l-subdev";
break;
+ case VFL_TYPE_SDR:
+ /* Use device name 'swradio' because 'sdr' was already taken. */
+ name_base = "swradio";
+ break;
+ case VFL_TYPE_TOUCH:
+ name_base = "v4l-touch";
+ break;
default:
- printk(KERN_ERR "%s called with unknown type: %d\n",
+ pr_err("%s called with unknown type: %d\n",
__func__, type);
return -EINVAL;
}
@@ -818,7 +985,7 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
* of 128-191 and just pick the first free minor there
* (new style). */
switch (type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
minor_offset = 0;
minor_cnt = 64;
break;
@@ -843,7 +1010,7 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
if (nr == minor_cnt)
nr = devnode_find(vdev, 0, minor_cnt);
if (nr == minor_cnt) {
- printk(KERN_ERR "could not get a free device node number\n");
+ pr_err("could not get a free device node number\n");
mutex_unlock(&videodev_lock);
return -ENFILE;
}
@@ -854,21 +1021,26 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* The device node number and minor numbers are independent, so
we just find the first free minor number. */
for (i = 0; i < VIDEO_NUM_DEVICES; i++)
- if (video_device[i] == NULL)
+ if (video_devices[i] == NULL)
break;
if (i == VIDEO_NUM_DEVICES) {
mutex_unlock(&videodev_lock);
- printk(KERN_ERR "could not get a free minor\n");
+ pr_err("could not get a free minor\n");
return -ENFILE;
}
#endif
vdev->minor = i + minor_offset;
vdev->num = nr;
- devnode_set(vdev);
/* Should not happen since we thought this minor was free */
- WARN_ON(video_device[vdev->minor] != NULL);
+ if (WARN_ON(video_devices[vdev->minor])) {
+ mutex_unlock(&videodev_lock);
+ pr_err("video_device not empty!\n");
+ return -ENFILE;
+ }
+ devnode_set(vdev);
vdev->index = get_index(vdev);
+ video_devices[vdev->minor] = vdev;
mutex_unlock(&videodev_lock);
if (vdev->ioctl_ops)
@@ -884,7 +1056,7 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
vdev->cdev->owner = owner;
ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1);
if (ret < 0) {
- printk(KERN_ERR "%s: cdev_add failed\n", __func__);
+ pr_err("%s: cdev_add failed\n", __func__);
kfree(vdev->cdev);
vdev->cdev = NULL;
goto cleanup;
@@ -894,44 +1066,30 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
vdev->dev.class = &video_class;
vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
vdev->dev.parent = vdev->dev_parent;
+ vdev->dev.release = v4l2_device_release;
dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
+
+ /* Increase v4l2_device refcount */
+ v4l2_device_get(vdev->v4l2_dev);
+
+ mutex_lock(&videodev_lock);
ret = device_register(&vdev->dev);
if (ret < 0) {
- printk(KERN_ERR "%s: device_register failed\n", __func__);
- goto cleanup;
+ mutex_unlock(&videodev_lock);
+ pr_err("%s: device_register failed\n", __func__);
+ put_device(&vdev->dev);
+ return ret;
}
- /* Register the release callback that will be called when the last
- reference to the device goes away. */
- vdev->dev.release = v4l2_device_release;
if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
- printk(KERN_WARNING "%s: requested %s%d, got %s\n", __func__,
+ pr_warn("%s: requested %s%d, got %s\n", __func__,
name_base, nr, video_device_node_name(vdev));
- /* Increase v4l2_device refcount */
- if (vdev->v4l2_dev)
- v4l2_device_get(vdev->v4l2_dev);
-
-#if defined(CONFIG_MEDIA_CONTROLLER)
/* Part 5: Register the entity. */
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV) {
- vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
- vdev->entity.name = vdev->name;
- vdev->entity.info.v4l.major = VIDEO_MAJOR;
- vdev->entity.info.v4l.minor = vdev->minor;
- ret = media_device_register_entity(vdev->v4l2_dev->mdev,
- &vdev->entity);
- if (ret < 0)
- printk(KERN_WARNING
- "%s: media_device_register_entity failed\n",
- __func__);
- }
-#endif
+ ret = video_register_media_controller(vdev);
+
/* Part 6: Activate this minor. The char device can now be used. */
set_bit(V4L2_FL_REGISTERED, &vdev->flags);
- mutex_lock(&videodev_lock);
- video_device[vdev->minor] = vdev;
mutex_unlock(&videodev_lock);
return 0;
@@ -940,6 +1098,7 @@ cleanup:
mutex_lock(&videodev_lock);
if (vdev->cdev)
cdev_del(vdev->cdev);
+ video_devices[vdev->minor] = NULL;
devnode_clear(vdev);
mutex_unlock(&videodev_lock);
/* Mark this video device as never having been registered. */
@@ -967,10 +1126,93 @@ void video_unregister_device(struct video_device *vdev)
*/
clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
mutex_unlock(&videodev_lock);
+ v4l2_event_wake_all(vdev);
device_unregister(&vdev->dev);
}
EXPORT_SYMBOL(video_unregister_device);
+#ifdef CONFIG_DEBUG_FS
+struct dentry *v4l2_debugfs_root(void)
+{
+ if (!v4l2_debugfs_root_dir)
+ v4l2_debugfs_root_dir = debugfs_create_dir("v4l2", NULL);
+ return v4l2_debugfs_root_dir;
+}
+EXPORT_SYMBOL_GPL(v4l2_debugfs_root);
+#endif
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+
+__must_check int video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (entity->num_pads != 1)
+ return -ENODEV;
+
+ return media_pipeline_start(&entity->pads[0], pipe);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline_start);
+
+__must_check int __video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (entity->num_pads != 1)
+ return -ENODEV;
+
+ return __media_pipeline_start(&entity->pads[0], pipe);
+}
+EXPORT_SYMBOL_GPL(__video_device_pipeline_start);
+
+void video_device_pipeline_stop(struct video_device *vdev)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (WARN_ON(entity->num_pads != 1))
+ return;
+
+ return media_pipeline_stop(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline_stop);
+
+void __video_device_pipeline_stop(struct video_device *vdev)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (WARN_ON(entity->num_pads != 1))
+ return;
+
+ return __media_pipeline_stop(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(__video_device_pipeline_stop);
+
+__must_check int video_device_pipeline_alloc_start(struct video_device *vdev)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (entity->num_pads != 1)
+ return -ENODEV;
+
+ return media_pipeline_alloc_start(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start);
+
+struct media_pipeline *video_device_pipeline(struct video_device *vdev)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (WARN_ON(entity->num_pads != 1))
+ return NULL;
+
+ return media_pad_pipeline(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline);
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
/*
* Initialise video for linux
*/
@@ -979,10 +1221,10 @@ static int __init videodev_init(void)
dev_t dev = MKDEV(VIDEO_MAJOR, 0);
int ret;
- printk(KERN_INFO "Linux video capture interface: v2.00\n");
+ pr_info("Linux video capture interface: v2.00\n");
ret = register_chrdev_region(dev, VIDEO_NUM_DEVICES, VIDEO_NAME);
if (ret < 0) {
- printk(KERN_WARNING "videodev: unable to get major %d\n",
+ pr_warn("videodev: unable to get major %d\n",
VIDEO_MAJOR);
return ret;
}
@@ -990,7 +1232,7 @@ static int __init videodev_init(void)
ret = class_register(&video_class);
if (ret < 0) {
unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
- printk(KERN_WARNING "video_dev: class_register failed\n");
+ pr_warn("video_dev: class_register failed\n");
return -EIO;
}
@@ -1003,19 +1245,14 @@ static void __exit videodev_exit(void)
class_unregister(&video_class);
unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
+ debugfs_remove_recursive(v4l2_debugfs_root_dir);
+ v4l2_debugfs_root_dir = NULL;
}
subsys_initcall(videodev_init);
module_exit(videodev_exit)
-MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>");
-MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2");
+MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@kernel.org>, Bill Dirks, Justin Schoeman, Gerd Knorr");
+MODULE_DESCRIPTION("Video4Linux2 core driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 02d1b6327117..63b12ef9d4d9 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -1,31 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
V4L2 device support.
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/module.h>
-#include <linux/i2c.h>
#include <linux/slab.h>
-#if defined(CONFIG_SPI)
-#include <linux/spi/spi.h>
-#endif
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -37,7 +21,6 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
INIT_LIST_HEAD(&v4l2_dev->subdevs);
spin_lock_init(&v4l2_dev->lock);
- mutex_init(&v4l2_dev->ioctl_lock);
v4l2_prio_init(&v4l2_dev->prio);
kref_init(&v4l2_dev->ref);
get_device(dev);
@@ -115,73 +98,62 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
/* Unregister subdevs */
list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
v4l2_device_unregister_subdev(sd);
-#if IS_ENABLED(CONFIG_I2C)
- if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- /* We need to unregister the i2c client explicitly.
- We cannot rely on i2c_del_adapter to always
- unregister clients for us, since if the i2c bus
- is a platform bus, then it is never deleted. */
- if (client)
- i2c_unregister_device(client);
- continue;
- }
-#endif
-#if defined(CONFIG_SPI)
- if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) {
- struct spi_device *spi = v4l2_get_subdevdata(sd);
-
- if (spi)
- spi_unregister_device(spi);
- continue;
- }
-#endif
+ if (sd->flags & V4L2_SUBDEV_FL_IS_I2C)
+ v4l2_i2c_subdev_unregister(sd);
+ else if (sd->flags & V4L2_SUBDEV_FL_IS_SPI)
+ v4l2_spi_subdev_unregister(sd);
}
/* Mark as unregistered, thus preventing duplicate unregistrations */
v4l2_dev->name[0] = '\0';
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister);
-int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
- struct v4l2_subdev *sd)
+int __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *sd, struct module *module)
{
-#if defined(CONFIG_MEDIA_CONTROLLER)
- struct media_entity *entity = &sd->entity;
-#endif
int err;
/* Check for valid input */
- if (v4l2_dev == NULL || sd == NULL || !sd->name[0])
+ if (!v4l2_dev || !sd || sd->v4l2_dev || !sd->name[0])
return -EINVAL;
- /* Warn if we apparently re-register a subdev */
- WARN_ON(sd->v4l2_dev != NULL);
+ /*
+ * The reason to acquire the module here is to avoid unloading
+ * a module of sub-device which is registered to a media
+ * device. To make it possible to unload modules for media
+ * devices that also register sub-devices, do not
+ * try_module_get() such sub-device owners.
+ */
+ sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver &&
+ module == v4l2_dev->dev->driver->owner;
- if (!try_module_get(sd->owner))
+ if (!sd->owner_v4l2_dev && !try_module_get(module))
return -ENODEV;
sd->v4l2_dev = v4l2_dev;
- if (sd->internal_ops && sd->internal_ops->registered) {
- err = sd->internal_ops->registered(sd);
- if (err)
- goto error_module;
- }
-
/* This just returns 0 if either of the two args is NULL */
- err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
+ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler,
+ NULL, true);
if (err)
- goto error_unregister;
+ goto error_module;
#if defined(CONFIG_MEDIA_CONTROLLER)
/* Register the entity. */
if (v4l2_dev->mdev) {
- err = media_device_register_entity(v4l2_dev->mdev, entity);
+ err = media_device_register_entity(v4l2_dev->mdev, &sd->entity);
if (err < 0)
- goto error_unregister;
+ goto error_module;
}
#endif
+ if (sd->internal_ops && sd->internal_ops->registered) {
+ err = sd->internal_ops->registered(sd);
+ if (err)
+ goto error_unregister;
+ }
+
+ sd->owner = module;
+
spin_lock(&v4l2_dev->lock);
list_add_tail(&sd->list, &v4l2_dev->subdevs);
spin_unlock(&v4l2_dev->lock);
@@ -189,23 +161,35 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
return 0;
error_unregister:
- if (sd->internal_ops && sd->internal_ops->unregistered)
- sd->internal_ops->unregistered(sd);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_device_unregister_entity(&sd->entity);
+#endif
error_module:
- module_put(sd->owner);
+ if (!sd->owner_v4l2_dev)
+ module_put(sd->owner);
sd->v4l2_dev = NULL;
return err;
}
-EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev);
-static void v4l2_device_release_subdev_node(struct video_device *vdev)
+static void v4l2_subdev_release(struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = video_get_drvdata(vdev);
+ struct module *owner = !sd->owner_v4l2_dev ? sd->owner : NULL;
+
+ if (sd->internal_ops && sd->internal_ops->release)
+ sd->internal_ops->release(sd);
sd->devnode = NULL;
+ module_put(owner);
+}
+
+static void v4l2_device_release_subdev_node(struct video_device *vdev)
+{
+ v4l2_subdev_release(video_get_drvdata(vdev));
kfree(vdev);
}
-int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
+int __v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev,
+ bool read_only)
{
struct video_device *vdev;
struct v4l2_subdev *sd;
@@ -218,6 +202,9 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
continue;
+ if (sd->devnode)
+ continue;
+
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
err = -ENOMEM;
@@ -225,22 +212,40 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
}
video_set_drvdata(vdev, sd);
- strlcpy(vdev->name, sd->name, sizeof(vdev->name));
+ strscpy(vdev->name, sd->name, sizeof(vdev->name));
+ vdev->dev_parent = sd->dev;
vdev->v4l2_dev = v4l2_dev;
vdev->fops = &v4l2_subdev_fops;
vdev->release = v4l2_device_release_subdev_node;
vdev->ctrl_handler = sd->ctrl_handler;
+ if (read_only)
+ set_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
+ sd->devnode = vdev;
err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
sd->owner);
if (err < 0) {
+ sd->devnode = NULL;
kfree(vdev);
goto clean_up;
}
#if defined(CONFIG_MEDIA_CONTROLLER)
- sd->entity.info.v4l.major = VIDEO_MAJOR;
- sd->entity.info.v4l.minor = vdev->minor;
+ sd->entity.info.dev.major = VIDEO_MAJOR;
+ sd->entity.info.dev.minor = vdev->minor;
+
+ /* Interface is created by __video_register_device() */
+ if (vdev->v4l2_dev->mdev) {
+ struct media_link *link;
+
+ link = media_create_intf_link(&sd->entity,
+ &vdev->intf_devnode->intf,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (!link) {
+ err = -ENOMEM;
+ goto clean_up;
+ }
+ }
#endif
- sd->devnode = vdev;
}
return 0;
@@ -253,7 +258,7 @@ clean_up:
return err;
}
-EXPORT_SYMBOL_GPL(v4l2_device_register_subdev_nodes);
+EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev_nodes);
void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
{
@@ -275,11 +280,16 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
#if defined(CONFIG_MEDIA_CONTROLLER)
if (v4l2_dev->mdev) {
- media_entity_remove_links(&sd->entity);
+ /*
+ * No need to explicitly remove links, as both pads and
+ * links are removed by the function below, in the right order
+ */
media_device_unregister_entity(&sd->entity);
}
#endif
- video_unregister_device(sd->devnode);
- module_put(sd->owner);
+ if (sd->devnode)
+ video_unregister_device(sd->devnode);
+ else
+ v4l2_subdev_release(sd);
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
new file mode 100644
index 000000000000..346d1b0e10ce
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -0,0 +1,1275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * v4l2-dv-timings - dv-timings helper functions
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/rational.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-dv-timings.h>
+#include <linux/math64.h>
+#include <linux/hdmi.h>
+#include <media/cec.h>
+
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
+MODULE_LICENSE("GPL");
+
+const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
+ V4L2_DV_BT_CEA_640X480P59_94,
+ V4L2_DV_BT_CEA_720X480I59_94,
+ V4L2_DV_BT_CEA_720X480P59_94,
+ V4L2_DV_BT_CEA_720X576I50,
+ V4L2_DV_BT_CEA_720X576P50,
+ V4L2_DV_BT_CEA_1280X720P24,
+ V4L2_DV_BT_CEA_1280X720P25,
+ V4L2_DV_BT_CEA_1280X720P30,
+ V4L2_DV_BT_CEA_1280X720P50,
+ V4L2_DV_BT_CEA_1280X720P60,
+ V4L2_DV_BT_CEA_1920X1080P24,
+ V4L2_DV_BT_CEA_1920X1080P25,
+ V4L2_DV_BT_CEA_1920X1080P30,
+ V4L2_DV_BT_CEA_1920X1080I50,
+ V4L2_DV_BT_CEA_1920X1080P50,
+ V4L2_DV_BT_CEA_1920X1080I60,
+ V4L2_DV_BT_CEA_1920X1080P60,
+ V4L2_DV_BT_DMT_640X350P85,
+ V4L2_DV_BT_DMT_640X400P85,
+ V4L2_DV_BT_DMT_720X400P85,
+ V4L2_DV_BT_DMT_640X480P72,
+ V4L2_DV_BT_DMT_640X480P75,
+ V4L2_DV_BT_DMT_640X480P85,
+ V4L2_DV_BT_DMT_800X600P56,
+ V4L2_DV_BT_DMT_800X600P60,
+ V4L2_DV_BT_DMT_800X600P72,
+ V4L2_DV_BT_DMT_800X600P75,
+ V4L2_DV_BT_DMT_800X600P85,
+ V4L2_DV_BT_DMT_800X600P120_RB,
+ V4L2_DV_BT_DMT_848X480P60,
+ V4L2_DV_BT_DMT_1024X768I43,
+ V4L2_DV_BT_DMT_1024X768P60,
+ V4L2_DV_BT_DMT_1024X768P70,
+ V4L2_DV_BT_DMT_1024X768P75,
+ V4L2_DV_BT_DMT_1024X768P85,
+ V4L2_DV_BT_DMT_1024X768P120_RB,
+ V4L2_DV_BT_DMT_1152X864P75,
+ V4L2_DV_BT_DMT_1280X768P60_RB,
+ V4L2_DV_BT_DMT_1280X768P60,
+ V4L2_DV_BT_DMT_1280X768P75,
+ V4L2_DV_BT_DMT_1280X768P85,
+ V4L2_DV_BT_DMT_1280X768P120_RB,
+ V4L2_DV_BT_DMT_1280X800P60_RB,
+ V4L2_DV_BT_DMT_1280X800P60,
+ V4L2_DV_BT_DMT_1280X800P75,
+ V4L2_DV_BT_DMT_1280X800P85,
+ V4L2_DV_BT_DMT_1280X800P120_RB,
+ V4L2_DV_BT_DMT_1280X960P60,
+ V4L2_DV_BT_DMT_1280X960P85,
+ V4L2_DV_BT_DMT_1280X960P120_RB,
+ V4L2_DV_BT_DMT_1280X1024P60,
+ V4L2_DV_BT_DMT_1280X1024P75,
+ V4L2_DV_BT_DMT_1280X1024P85,
+ V4L2_DV_BT_DMT_1280X1024P120_RB,
+ V4L2_DV_BT_DMT_1360X768P60,
+ V4L2_DV_BT_DMT_1360X768P120_RB,
+ V4L2_DV_BT_DMT_1366X768P60,
+ V4L2_DV_BT_DMT_1366X768P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60,
+ V4L2_DV_BT_DMT_1400X1050P75,
+ V4L2_DV_BT_DMT_1400X1050P85,
+ V4L2_DV_BT_DMT_1400X1050P120_RB,
+ V4L2_DV_BT_DMT_1440X900P60_RB,
+ V4L2_DV_BT_DMT_1440X900P60,
+ V4L2_DV_BT_DMT_1440X900P75,
+ V4L2_DV_BT_DMT_1440X900P85,
+ V4L2_DV_BT_DMT_1440X900P120_RB,
+ V4L2_DV_BT_DMT_1600X900P60_RB,
+ V4L2_DV_BT_DMT_1600X1200P60,
+ V4L2_DV_BT_DMT_1600X1200P65,
+ V4L2_DV_BT_DMT_1600X1200P70,
+ V4L2_DV_BT_DMT_1600X1200P75,
+ V4L2_DV_BT_DMT_1600X1200P85,
+ V4L2_DV_BT_DMT_1600X1200P120_RB,
+ V4L2_DV_BT_DMT_1680X1050P60_RB,
+ V4L2_DV_BT_DMT_1680X1050P60,
+ V4L2_DV_BT_DMT_1680X1050P75,
+ V4L2_DV_BT_DMT_1680X1050P85,
+ V4L2_DV_BT_DMT_1680X1050P120_RB,
+ V4L2_DV_BT_DMT_1792X1344P60,
+ V4L2_DV_BT_DMT_1792X1344P75,
+ V4L2_DV_BT_DMT_1792X1344P120_RB,
+ V4L2_DV_BT_DMT_1856X1392P60,
+ V4L2_DV_BT_DMT_1856X1392P75,
+ V4L2_DV_BT_DMT_1856X1392P120_RB,
+ V4L2_DV_BT_DMT_1920X1200P60_RB,
+ V4L2_DV_BT_DMT_1920X1200P60,
+ V4L2_DV_BT_DMT_1920X1200P75,
+ V4L2_DV_BT_DMT_1920X1200P85,
+ V4L2_DV_BT_DMT_1920X1200P120_RB,
+ V4L2_DV_BT_DMT_1920X1440P60,
+ V4L2_DV_BT_DMT_1920X1440P75,
+ V4L2_DV_BT_DMT_1920X1440P120_RB,
+ V4L2_DV_BT_DMT_2048X1152P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60,
+ V4L2_DV_BT_DMT_2560X1600P75,
+ V4L2_DV_BT_DMT_2560X1600P85,
+ V4L2_DV_BT_DMT_2560X1600P120_RB,
+ V4L2_DV_BT_CEA_3840X2160P24,
+ V4L2_DV_BT_CEA_3840X2160P25,
+ V4L2_DV_BT_CEA_3840X2160P30,
+ V4L2_DV_BT_CEA_3840X2160P50,
+ V4L2_DV_BT_CEA_3840X2160P60,
+ V4L2_DV_BT_CEA_4096X2160P24,
+ V4L2_DV_BT_CEA_4096X2160P25,
+ V4L2_DV_BT_CEA_4096X2160P30,
+ V4L2_DV_BT_CEA_4096X2160P50,
+ V4L2_DV_BT_DMT_4096X2160P59_94_RB,
+ V4L2_DV_BT_CEA_4096X2160P60,
+ { }
+};
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets);
+
+bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *dvcap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
+ u32 caps = cap->capabilities;
+ const u32 max_vert = 10240;
+ u32 max_hor = 3 * bt->width;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t->type != dvcap->type ||
+ bt->height < cap->min_height ||
+ bt->height > cap->max_height ||
+ bt->width < cap->min_width ||
+ bt->width > cap->max_width ||
+ bt->pixelclock < cap->min_pixelclock ||
+ bt->pixelclock > cap->max_pixelclock ||
+ (!(caps & V4L2_DV_BT_CAP_CUSTOM) &&
+ cap->standards && bt->standards &&
+ !(bt->standards & cap->standards)) ||
+ (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
+ (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
+ return false;
+
+ /* sanity checks for the blanking timings */
+ if (!bt->interlaced &&
+ (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
+ return false;
+ /*
+ * Some video receivers cannot properly separate the frontporch,
+ * backporch and sync values, and instead they only have the total
+ * blanking. That can be assigned to any of these three fields.
+ * So just check that none of these are way out of range.
+ */
+ if (bt->hfrontporch > max_hor ||
+ bt->hsync > max_hor || bt->hbackporch > max_hor)
+ return false;
+ if (bt->vfrontporch > max_vert ||
+ bt->vsync > max_vert || bt->vbackporch > max_vert)
+ return false;
+ if (bt->interlaced && (bt->il_vfrontporch > max_vert ||
+ bt->il_vsync > max_vert || bt->il_vbackporch > max_vert))
+ return false;
+ return fnc == NULL || fnc(t, fnc_handle);
+}
+EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
+
+int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ u32 i, idx;
+
+ memset(t->reserved, 0, sizeof(t->reserved));
+ for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ idx++ == t->index) {
+ t->timings = v4l2_dv_timings_presets[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap);
+
+bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ unsigned pclock_delta,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ int i;
+
+ if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
+ return false;
+
+ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
+ pclock_delta, false)) {
+ u32 flags = t->bt.flags & V4L2_DV_FL_REDUCED_FPS;
+
+ *t = v4l2_dv_timings_presets[i];
+ if (can_reduce_fps(&t->bt))
+ t->bt.flags |= flags;
+
+ return true;
+ }
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
+
+bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
+{
+ unsigned int i;
+
+ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ const struct v4l2_bt_timings *bt =
+ &v4l2_dv_timings_presets[i].bt;
+
+ if ((bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) &&
+ bt->cea861_vic == vic) {
+ *t = v4l2_dv_timings_presets[i];
+ return true;
+ }
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
+
+/**
+ * v4l2_match_dv_timings - check if two timings match
+ * @t1: compare this v4l2_dv_timings struct...
+ * @t2: with this struct.
+ * @pclock_delta: the allowed pixelclock deviation.
+ * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
+ * match.
+ *
+ * Compare t1 with t2 with a given margin of error for the pixelclock.
+ */
+bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1,
+ const struct v4l2_dv_timings *t2,
+ unsigned pclock_delta, bool match_reduced_fps)
+{
+ if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t1->bt.width == t2->bt.width &&
+ t1->bt.height == t2->bt.height &&
+ t1->bt.interlaced == t2->bt.interlaced &&
+ t1->bt.polarities == t2->bt.polarities &&
+ t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
+ t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
+ t1->bt.hfrontporch == t2->bt.hfrontporch &&
+ t1->bt.hsync == t2->bt.hsync &&
+ t1->bt.hbackporch == t2->bt.hbackporch &&
+ t1->bt.vfrontporch == t2->bt.vfrontporch &&
+ t1->bt.vsync == t2->bt.vsync &&
+ t1->bt.vbackporch == t2->bt.vbackporch &&
+ (!match_reduced_fps ||
+ (t1->bt.flags & V4L2_DV_FL_REDUCED_FPS) ==
+ (t2->bt.flags & V4L2_DV_FL_REDUCED_FPS)) &&
+ (!t1->bt.interlaced ||
+ (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
+ t1->bt.il_vsync == t2->bt.il_vsync &&
+ t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_match_dv_timings);
+
+void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
+ const struct v4l2_dv_timings *t, bool detailed)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ u32 htot, vtot;
+ u32 fps;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ if (bt->interlaced)
+ vtot /= 2;
+
+ fps = (htot * vtot) > 0 ? div_u64((100 * (u64)bt->pixelclock),
+ (htot * vtot)) : 0;
+
+ if (prefix == NULL)
+ prefix = "";
+
+ pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix,
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ fps / 100, fps % 100, htot, vtot);
+
+ if (!detailed)
+ return;
+
+ pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->hfrontporch,
+ (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
+ bt->hsync, bt->hbackporch);
+ pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->vfrontporch,
+ (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
+ bt->vsync, bt->vbackporch);
+ if (bt->interlaced)
+ pr_info("%s: vertical bottom field: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->il_vfrontporch,
+ (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
+ bt->il_vsync, bt->il_vbackporch);
+ pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
+ pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s%s%s%s\n",
+ dev_prefix, bt->flags,
+ (bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
+ " REDUCED_BLANKING" : "",
+ ((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
+ bt->vsync == 8) ? " (V2)" : "",
+ (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
+ " CAN_REDUCE_FPS" : "",
+ (bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
+ " REDUCED_FPS" : "",
+ (bt->flags & V4L2_DV_FL_HALF_LINE) ?
+ " HALF_LINE" : "",
+ (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
+ " CE_VIDEO" : "",
+ (bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ?
+ " FIRST_FIELD_EXTRA_LINE" : "",
+ (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) ?
+ " HAS_PICTURE_ASPECT" : "",
+ (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) ?
+ " HAS_CEA861_VIC" : "",
+ (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) ?
+ " HAS_HDMI_VIC" : "");
+ pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards,
+ (bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
+ (bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
+ (bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
+ (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "",
+ (bt->standards & V4L2_DV_BT_STD_SDI) ? " SDI" : "");
+ if (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT)
+ pr_info("%s: picture aspect (hor:vert): %u:%u\n", dev_prefix,
+ bt->picture_aspect.numerator,
+ bt->picture_aspect.denominator);
+ if (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC)
+ pr_info("%s: CEA-861 VIC: %u\n", dev_prefix, bt->cea861_vic);
+ if (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC)
+ pr_info("%s: HDMI VIC: %u\n", dev_prefix, bt->hdmi_vic);
+}
+EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
+
+struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t)
+{
+ struct v4l2_fract ratio = { 1, 1 };
+ unsigned long n, d;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return ratio;
+ if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT))
+ return ratio;
+
+ ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator;
+ ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator;
+
+ rational_best_approximation(ratio.numerator, ratio.denominator,
+ ratio.numerator, ratio.denominator, &n, &d);
+ ratio.numerator = n;
+ ratio.denominator = d;
+ return ratio;
+}
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio);
+
+/** v4l2_calc_timeperframe - helper function to calculate timeperframe based
+ * v4l2_dv_timings fields.
+ * @t - Timings for the video mode.
+ *
+ * Calculates the expected timeperframe using the pixel clock value and
+ * horizontal/vertical measures. This means that v4l2_dv_timings structure
+ * must be correctly and fully filled.
+ */
+struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ struct v4l2_fract fps_fract = { 1, 1 };
+ unsigned long n, d;
+ u32 htot, vtot, fps;
+ u64 pclk;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return fps_fract;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ pclk = bt->pixelclock;
+
+ if ((bt->flags & V4L2_DV_FL_CAN_DETECT_REDUCED_FPS) &&
+ (bt->flags & V4L2_DV_FL_REDUCED_FPS))
+ pclk = div_u64(pclk * 1000ULL, 1001);
+
+ fps = (htot * vtot) > 0 ? div_u64((100 * pclk), (htot * vtot)) : 0;
+ if (!fps)
+ return fps_fract;
+
+ rational_best_approximation(fps, 100, fps, 100, &n, &d);
+
+ fps_fract.numerator = d;
+ fps_fract.denominator = n;
+ return fps_fract;
+}
+EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe);
+
+/*
+ * CVT defines
+ * Based on Coordinated Video Timings Standard
+ * version 1.1 September 10, 2003
+ */
+
+#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+#define CVT_PXL_CLK_GRAN_RB_V2 1000 /* granularity for reduced blanking v2*/
+
+/* Normal blanking */
+#define CVT_MIN_V_BPORCH 7 /* lines */
+#define CVT_MIN_V_PORCH_RND 3 /* lines */
+#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+#define CVT_HSYNC_PERCENT 8 /* nominal hsync as percentage of line */
+
+/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
+#define CVT_CELL_GRAN 8 /* character cell granularity */
+#define CVT_M 600 /* blanking formula gradient */
+#define CVT_C 40 /* blanking formula offset */
+#define CVT_K 128 /* blanking formula scaling factor */
+#define CVT_J 20 /* blanking formula scaling factor */
+#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
+#define CVT_M_PRIME (CVT_K * CVT_M / 256)
+
+/* Reduced Blanking */
+#define CVT_RB_MIN_V_BPORCH 7 /* lines */
+#define CVT_RB_V_FPORCH 3 /* lines */
+#define CVT_RB_MIN_V_BLANK 460 /* us */
+#define CVT_RB_H_SYNC 32 /* pixels */
+#define CVT_RB_H_BLANK 160 /* pixels */
+/* Reduce blanking Version 2 */
+#define CVT_RB_V2_H_BLANK 80 /* pixels */
+#define CVT_RB_MIN_V_FPORCH 3 /* lines */
+#define CVT_RB_V2_MIN_V_FPORCH 1 /* lines */
+#define CVT_RB_V_BPORCH 6 /* lines */
+
+/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @active_width - active width of image (does not include blanking). This
+ * information is needed only in case of version 2 of reduced blanking.
+ * In other cases, this parameter does not have any effect on timings.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @interlaced - if this flag is true, it indicates interlaced format
+ * @cap - the v4l2_dv_timings_cap capabilities.
+ * @timings - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid CVT format. If so, then it will return true, and fmt will be filled
+ * in with the found CVT timings.
+ */
+bool v4l2_detect_cvt(unsigned int frame_height,
+ unsigned int hfreq,
+ unsigned int vsync,
+ unsigned int active_width,
+ u32 polarities,
+ bool interlaced,
+ const struct v4l2_dv_timings_cap *cap,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_dv_timings t = {};
+ int v_fp, v_bp, h_fp, h_bp, hsync;
+ int frame_width, image_height, image_width;
+ bool reduced_blanking;
+ bool rb_v2 = false;
+ unsigned int pix_clk;
+
+ if (vsync < 4 || vsync > 8)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ reduced_blanking = false;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ reduced_blanking = true;
+ else
+ return false;
+
+ if (reduced_blanking && vsync == 8)
+ rb_v2 = true;
+
+ if (rb_v2 && active_width == 0)
+ return false;
+
+ if (!rb_v2 && vsync > 7)
+ return false;
+
+ if (hfreq == 0)
+ return false;
+
+ /* Vertical */
+ if (reduced_blanking) {
+ if (rb_v2) {
+ v_bp = CVT_RB_V_BPORCH;
+ v_fp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
+ v_fp -= vsync + v_bp;
+
+ if (v_fp < CVT_RB_V2_MIN_V_FPORCH)
+ v_fp = CVT_RB_V2_MIN_V_FPORCH;
+ } else {
+ v_fp = CVT_RB_V_FPORCH;
+ v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
+ v_bp -= vsync + v_fp;
+
+ if (v_bp < CVT_RB_MIN_V_BPORCH)
+ v_bp = CVT_RB_MIN_V_BPORCH;
+ }
+ } else {
+ v_fp = CVT_MIN_V_PORCH_RND;
+ v_bp = (CVT_MIN_VSYNC_BP * hfreq) / 1000000 + 1 - vsync;
+
+ if (v_bp < CVT_MIN_V_BPORCH)
+ v_bp = CVT_MIN_V_BPORCH;
+ }
+
+ if (interlaced)
+ image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1;
+ else
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ if (image_height < 0)
+ return false;
+
+ /* Aspect ratio based on vsync */
+ switch (vsync) {
+ case 4:
+ image_width = (image_height * 4) / 3;
+ break;
+ case 5:
+ image_width = (image_height * 16) / 9;
+ break;
+ case 6:
+ image_width = (image_height * 16) / 10;
+ break;
+ case 7:
+ /* special case */
+ if (image_height == 1024)
+ image_width = (image_height * 5) / 4;
+ else if (image_height == 768)
+ image_width = (image_height * 15) / 9;
+ else
+ return false;
+ break;
+ case 8:
+ image_width = active_width;
+ break;
+ default:
+ return false;
+ }
+
+ if (!rb_v2)
+ image_width = image_width & ~7;
+
+ /* Horizontal */
+ if (reduced_blanking) {
+ int h_blank;
+ int clk_gran;
+
+ h_blank = rb_v2 ? CVT_RB_V2_H_BLANK : CVT_RB_H_BLANK;
+ clk_gran = rb_v2 ? CVT_PXL_CLK_GRAN_RB_V2 : CVT_PXL_CLK_GRAN;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = (pix_clk / clk_gran) * clk_gran;
+
+ h_bp = h_blank / 2;
+ hsync = CVT_RB_H_SYNC;
+ h_fp = h_blank - h_bp - hsync;
+
+ frame_width = image_width + h_blank;
+ } else {
+ unsigned ideal_duty_cycle_per_myriad =
+ 100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq;
+ int h_blank;
+
+ if (ideal_duty_cycle_per_myriad < 2000)
+ ideal_duty_cycle_per_myriad = 2000;
+
+ h_blank = image_width * ideal_duty_cycle_per_myriad /
+ (10000 - ideal_duty_cycle_per_myriad);
+ h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+
+ h_bp = h_blank / 2;
+ frame_width = image_width + h_blank;
+
+ hsync = frame_width * CVT_HSYNC_PERCENT / 100;
+ hsync = (hsync / CVT_CELL_GRAN) * CVT_CELL_GRAN;
+ h_fp = h_blank - hsync - h_bp;
+ }
+
+ t.type = V4L2_DV_BT_656_1120;
+ t.bt.polarities = polarities;
+ t.bt.width = image_width;
+ t.bt.height = image_height;
+ t.bt.hfrontporch = h_fp;
+ t.bt.vfrontporch = v_fp;
+ t.bt.hsync = hsync;
+ t.bt.vsync = vsync;
+ t.bt.hbackporch = frame_width - image_width - h_fp - hsync;
+
+ if (!interlaced) {
+ t.bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ t.bt.interlaced = V4L2_DV_PROGRESSIVE;
+ } else {
+ t.bt.vbackporch = (frame_height - image_height - 2 * v_fp -
+ 2 * vsync) / 2;
+ t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
+ 2 * vsync - t.bt.vbackporch;
+ t.bt.il_vfrontporch = v_fp;
+ t.bt.il_vsync = vsync;
+ t.bt.flags |= V4L2_DV_FL_HALF_LINE;
+ t.bt.interlaced = V4L2_DV_INTERLACED;
+ }
+
+ t.bt.pixelclock = pix_clk;
+ t.bt.standards = V4L2_DV_BT_STD_CVT;
+
+ if (reduced_blanking)
+ t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+
+ if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL))
+ return false;
+ *timings = t;
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+
+/*
+ * GTF defines
+ * Based on Generalized Timing Formula Standard
+ * Version 1.1 September 2, 1999
+ */
+
+#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+
+#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+#define GTF_V_FP 1 /* vertical front porch (lines) */
+#define GTF_CELL_GRAN 8 /* character cell granularity */
+
+/* Default */
+#define GTF_D_M 600 /* blanking formula gradient */
+#define GTF_D_C 40 /* blanking formula offset */
+#define GTF_D_K 128 /* blanking formula scaling factor */
+#define GTF_D_J 20 /* blanking formula scaling factor */
+#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
+#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
+
+/* Secondary */
+#define GTF_S_M 3600 /* blanking formula gradient */
+#define GTF_S_C 40 /* blanking formula offset */
+#define GTF_S_K 128 /* blanking formula scaling factor */
+#define GTF_S_J 35 /* blanking formula scaling factor */
+#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
+#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
+
+/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @interlaced - if this flag is true, it indicates interlaced format
+ * @aspect - preferred aspect ratio. GTF has no method of determining the
+ * aspect ratio in order to derive the image width from the
+ * image height, so it has to be passed explicitly. Usually
+ * the native screen aspect ratio is used for this. If it
+ * is not filled in correctly, then 16:9 will be assumed.
+ * @cap - the v4l2_dv_timings_cap capabilities.
+ * @timings - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid GTF format. If so, then it will return true, and fmt will be filled
+ * in with the found GTF timings.
+ */
+bool v4l2_detect_gtf(unsigned int frame_height,
+ unsigned int hfreq,
+ unsigned int vsync,
+ u32 polarities,
+ bool interlaced,
+ struct v4l2_fract aspect,
+ const struct v4l2_dv_timings_cap *cap,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_dv_timings t = {};
+ int pix_clk;
+ int v_fp, v_bp, h_fp, hsync;
+ int frame_width, image_height, image_width;
+ bool default_gtf;
+ int h_blank;
+
+ if (vsync != 3)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ default_gtf = true;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ default_gtf = false;
+ else
+ return false;
+
+ if (hfreq == 0)
+ return false;
+
+ /* Vertical */
+ v_fp = GTF_V_FP;
+ v_bp = (GTF_MIN_VSYNC_BP * hfreq + 500000) / 1000000 - vsync;
+ if (interlaced)
+ image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1;
+ else
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ if (image_height < 0)
+ return false;
+
+ if (aspect.numerator == 0 || aspect.denominator == 0) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ }
+ image_width = ((image_height * aspect.numerator) / aspect.denominator);
+ image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
+
+ /* Horizontal */
+ if (default_gtf) {
+ u64 num;
+ u32 den;
+
+ num = (((u64)image_width * GTF_D_C_PRIME * hfreq) -
+ ((u64)image_width * GTF_D_M_PRIME * 1000));
+ den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) *
+ (2 * GTF_CELL_GRAN);
+ h_blank = div_u64((num + (den >> 1)), den);
+ h_blank *= (2 * GTF_CELL_GRAN);
+ } else {
+ u64 num;
+ u32 den;
+
+ num = (((u64)image_width * GTF_S_C_PRIME * hfreq) -
+ ((u64)image_width * GTF_S_M_PRIME * 1000));
+ den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) *
+ (2 * GTF_CELL_GRAN);
+ h_blank = div_u64((num + (den >> 1)), den);
+ h_blank *= (2 * GTF_CELL_GRAN);
+ }
+
+ frame_width = image_width + h_blank;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
+
+ hsync = (frame_width * 8 + 50) / 100;
+ hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN;
+
+ h_fp = h_blank / 2 - hsync;
+
+ t.type = V4L2_DV_BT_656_1120;
+ t.bt.polarities = polarities;
+ t.bt.width = image_width;
+ t.bt.height = image_height;
+ t.bt.hfrontporch = h_fp;
+ t.bt.vfrontporch = v_fp;
+ t.bt.hsync = hsync;
+ t.bt.vsync = vsync;
+ t.bt.hbackporch = frame_width - image_width - h_fp - hsync;
+
+ if (!interlaced) {
+ t.bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ t.bt.interlaced = V4L2_DV_PROGRESSIVE;
+ } else {
+ t.bt.vbackporch = (frame_height - image_height - 2 * v_fp -
+ 2 * vsync) / 2;
+ t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
+ 2 * vsync - t.bt.vbackporch;
+ t.bt.il_vfrontporch = v_fp;
+ t.bt.il_vsync = vsync;
+ t.bt.flags |= V4L2_DV_FL_HALF_LINE;
+ t.bt.interlaced = V4L2_DV_INTERLACED;
+ }
+
+ t.bt.pixelclock = pix_clk;
+ t.bt.standards = V4L2_DV_BT_STD_GTF;
+
+ if (!default_gtf)
+ t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+
+ if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL))
+ return false;
+ *timings = t;
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
+
+/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+ * 0x15 and 0x16 from the EDID.
+ * @hor_landscape - byte 0x15 from the EDID.
+ * @vert_portrait - byte 0x16 from the EDID.
+ *
+ * Determines the aspect ratio from the EDID.
+ * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
+ * "Horizontal and Vertical Screen Size or Aspect Ratio"
+ */
+struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
+{
+ struct v4l2_fract aspect = { 16, 9 };
+ u8 ratio;
+
+ /* Nothing filled in, fallback to 16:9 */
+ if (!hor_landscape && !vert_portrait)
+ return aspect;
+ /* Both filled in, so they are interpreted as the screen size in cm */
+ if (hor_landscape && vert_portrait) {
+ aspect.numerator = hor_landscape;
+ aspect.denominator = vert_portrait;
+ return aspect;
+ }
+ /* Only one is filled in, so interpret them as a ratio:
+ (val + 99) / 100 */
+ ratio = hor_landscape | vert_portrait;
+ /* Change some rounded values into the exact aspect ratio */
+ if (ratio == 79) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ } else if (ratio == 34) {
+ aspect.numerator = 4;
+ aspect.denominator = 3;
+ } else if (ratio == 68) {
+ aspect.numerator = 15;
+ aspect.denominator = 9;
+ } else {
+ aspect.numerator = hor_landscape + 99;
+ aspect.denominator = 100;
+ }
+ if (hor_landscape)
+ return aspect;
+ /* The aspect ratio is for portrait, so swap numerator and denominator */
+ swap(aspect.denominator, aspect.numerator);
+ return aspect;
+}
+EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
+
+/** v4l2_hdmi_rx_colorimetry - determine HDMI colorimetry information
+ * based on various InfoFrames.
+ * @avi: the AVI InfoFrame
+ * @hdmi: the HDMI Vendor InfoFrame, may be NULL
+ * @height: the frame height
+ *
+ * Determines the HDMI colorimetry information, i.e. how the HDMI
+ * pixel color data should be interpreted.
+ *
+ * Note that some of the newer features (DCI-P3, HDR) are not yet
+ * implemented: the hdmi.h header needs to be updated to the HDMI 2.0
+ * and CTA-861-G standards.
+ */
+struct v4l2_hdmi_colorimetry
+v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
+ const struct hdmi_vendor_infoframe *hdmi,
+ unsigned int height)
+{
+ struct v4l2_hdmi_colorimetry c = {
+ V4L2_COLORSPACE_SRGB,
+ V4L2_YCBCR_ENC_DEFAULT,
+ V4L2_QUANTIZATION_FULL_RANGE,
+ V4L2_XFER_FUNC_SRGB
+ };
+ bool is_ce = avi->video_code || (hdmi && hdmi->vic);
+ bool is_sdtv = height <= 576;
+ bool default_is_lim_range_rgb = avi->video_code > 1;
+
+ switch (avi->colorspace) {
+ case HDMI_COLORSPACE_RGB:
+ /* RGB pixel encoding */
+ switch (avi->colorimetry) {
+ case HDMI_COLORIMETRY_EXTENDED:
+ switch (avi->extended_colorimetry) {
+ case HDMI_EXTENDED_COLORIMETRY_OPRGB:
+ c.colorspace = V4L2_COLORSPACE_OPRGB;
+ c.xfer_func = V4L2_XFER_FUNC_OPRGB;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_BT2020:
+ c.colorspace = V4L2_COLORSPACE_BT2020;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ switch (avi->quantization_range) {
+ case HDMI_QUANTIZATION_RANGE_LIMITED:
+ c.quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ break;
+ case HDMI_QUANTIZATION_RANGE_FULL:
+ break;
+ default:
+ if (default_is_lim_range_rgb)
+ c.quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ break;
+ }
+ break;
+
+ default:
+ /* YCbCr pixel encoding */
+ c.quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ switch (avi->colorimetry) {
+ case HDMI_COLORIMETRY_NONE:
+ if (!is_ce)
+ break;
+ if (is_sdtv) {
+ c.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ } else {
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_709;
+ }
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_COLORIMETRY_ITU_601:
+ c.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_COLORIMETRY_ITU_709:
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_709;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_COLORIMETRY_EXTENDED:
+ switch (avi->extended_colorimetry) {
+ case HDMI_EXTENDED_COLORIMETRY_XV_YCC_601:
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_XV709;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_XV_YCC_709:
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_XV601;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_S_YCC_601:
+ c.colorspace = V4L2_COLORSPACE_SRGB;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ c.xfer_func = V4L2_XFER_FUNC_SRGB;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_OPYCC_601:
+ c.colorspace = V4L2_COLORSPACE_OPRGB;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ c.xfer_func = V4L2_XFER_FUNC_OPRGB;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_BT2020:
+ c.colorspace = V4L2_COLORSPACE_BT2020;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM:
+ c.colorspace = V4L2_COLORSPACE_BT2020;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020_CONST_LUM;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ default: /* fall back to ITU_709 */
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_709;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ /*
+ * YCC Quantization Range signaling is more-or-less broken,
+ * let's just ignore this.
+ */
+ break;
+ }
+ return c;
+}
+EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
+
+/**
+ * v4l2_num_edid_blocks() - return the number of EDID blocks
+ *
+ * @edid: pointer to the EDID data
+ * @max_blocks: maximum number of supported EDID blocks
+ *
+ * Return: the number of EDID blocks based on the contents of the EDID.
+ * This supports the HDMI Forum EDID Extension Override Data Block.
+ */
+unsigned int v4l2_num_edid_blocks(const u8 *edid, unsigned int max_blocks)
+{
+ unsigned int blocks;
+
+ if (!edid || !max_blocks)
+ return 0;
+
+ // The number of extension blocks is recorded at byte 126 of the
+ // first 128-byte block in the EDID.
+ //
+ // If there is an HDMI Forum EDID Extension Override Data Block
+ // present, then it is in bytes 4-6 of the first CTA-861 extension
+ // block of the EDID.
+ blocks = edid[126] + 1;
+ // Check for HDMI Forum EDID Extension Override Data Block
+ if (blocks >= 2 && // The EDID must be at least 2 blocks
+ max_blocks >= 3 && // The caller supports at least 3 blocks
+ edid[128] == 2 && // The first extension block is type CTA-861
+ edid[133] == 0x78 && // Identifier for the EEODB
+ (edid[132] & 0xe0) == 0xe0 && // Tag Code == 7
+ (edid[132] & 0x1f) >= 2 && // Length >= 2
+ edid[134] > 1) // Number of extension blocks is sane
+ blocks = edid[134] + 1;
+ return blocks > max_blocks ? max_blocks : blocks;
+}
+EXPORT_SYMBOL_GPL(v4l2_num_edid_blocks);
+
+/**
+ * v4l2_get_edid_phys_addr() - find and return the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @offset: If not %NULL then the location of the physical address
+ * bytes in the EDID will be returned here. This is set to 0
+ * if there is no physical address found.
+ *
+ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
+ */
+u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+ if (offset)
+ *offset = loc;
+ if (loc == 0)
+ return CEC_PHYS_ADDR_INVALID;
+ return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr);
+
+/**
+ * v4l2_set_edid_phys_addr() - find and set the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @phys_addr: the new physical address
+ *
+ * This function finds the location of the physical address in the EDID
+ * and fills in the given physical address and updates the checksum
+ * at the end of the EDID block. It does nothing if the EDID doesn't
+ * contain a physical address.
+ */
+void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+ u8 sum = 0;
+ unsigned int i;
+
+ if (loc == 0)
+ return;
+ edid[loc] = phys_addr >> 8;
+ edid[loc + 1] = phys_addr & 0xff;
+ loc &= ~0x7f;
+
+ /* update the checksum */
+ for (i = loc; i < loc + 127; i++)
+ sum += edid[i];
+ edid[i] = 256 - sum;
+}
+EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr);
+
+/**
+ * v4l2_phys_addr_for_input() - calculate the PA for an input
+ *
+ * @phys_addr: the physical address of the parent
+ * @input: the number of the input port, must be between 1 and 15
+ *
+ * This function calculates a new physical address based on the input
+ * port number. For example:
+ *
+ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
+ *
+ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
+ *
+ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
+ *
+ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
+ *
+ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
+ */
+u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+ /* Check if input is sane */
+ if (WARN_ON(input == 0 || input > 0xf))
+ return CEC_PHYS_ADDR_INVALID;
+
+ if (phys_addr == 0)
+ return input << 12;
+
+ if ((phys_addr & 0x0fff) == 0)
+ return phys_addr | (input << 8);
+
+ if ((phys_addr & 0x00ff) == 0)
+ return phys_addr | (input << 4);
+
+ if ((phys_addr & 0x000f) == 0)
+ return phys_addr | input;
+
+ /*
+ * All nibbles are used so no valid physical addresses can be assigned
+ * to the input.
+ */
+ return CEC_PHYS_ADDR_INVALID;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input);
+
+/**
+ * v4l2_phys_addr_validate() - validate a physical address from an EDID
+ *
+ * @phys_addr: the physical address to validate
+ * @parent: if not %NULL, then this is filled with the parents PA.
+ * @port: if not %NULL, then this is filled with the input port.
+ *
+ * This validates a physical address as read from an EDID. If the
+ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
+ * then it will return -EINVAL.
+ *
+ * The parent PA is passed into %parent and the input port is passed into
+ * %port. For example:
+ *
+ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
+ *
+ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
+ *
+ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
+ *
+ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
+ *
+ * Return: 0 if the PA is valid, -EINVAL if not.
+ */
+int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+ int i;
+
+ if (parent)
+ *parent = phys_addr;
+ if (port)
+ *port = 0;
+ if (phys_addr == CEC_PHYS_ADDR_INVALID)
+ return 0;
+ for (i = 0; i < 16; i += 4)
+ if (phys_addr & (0xf << i))
+ break;
+ if (i == 16)
+ return 0;
+ if (parent)
+ *parent = phys_addr & (0xfff0 << i);
+ if (port)
+ *port = (phys_addr >> i) & 0xf;
+ for (i += 4; i < 16; i += 4)
+ if ((phys_addr & (0xf << i)) == 0)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate);
+
+#ifdef CONFIG_DEBUG_FS
+
+#define DEBUGFS_FOPS(type, flag) \
+static ssize_t \
+infoframe_read_##type(struct file *filp, \
+ char __user *ubuf, size_t count, loff_t *ppos) \
+{ \
+ struct v4l2_debugfs_if *infoframes = filp->private_data; \
+ \
+ return infoframes->if_read((flag), infoframes->priv, filp, \
+ ubuf, count, ppos); \
+} \
+ \
+static const struct file_operations infoframe_##type##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = infoframe_read_##type, \
+}
+
+DEBUGFS_FOPS(avi, V4L2_DEBUGFS_IF_AVI);
+DEBUGFS_FOPS(audio, V4L2_DEBUGFS_IF_AUDIO);
+DEBUGFS_FOPS(spd, V4L2_DEBUGFS_IF_SPD);
+DEBUGFS_FOPS(hdmi, V4L2_DEBUGFS_IF_HDMI);
+DEBUGFS_FOPS(drm, V4L2_DEBUGFS_IF_DRM);
+
+struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types,
+ void *priv,
+ v4l2_debugfs_if_read_t if_read)
+{
+ struct v4l2_debugfs_if *infoframes;
+
+ if (IS_ERR_OR_NULL(root) || !if_types || !if_read)
+ return NULL;
+
+ infoframes = kzalloc(sizeof(*infoframes), GFP_KERNEL);
+ if (!infoframes)
+ return NULL;
+
+ infoframes->if_dir = debugfs_create_dir("infoframes", root);
+ infoframes->priv = priv;
+ infoframes->if_read = if_read;
+ if (if_types & V4L2_DEBUGFS_IF_AVI)
+ debugfs_create_file("avi", 0400, infoframes->if_dir,
+ infoframes, &infoframe_avi_fops);
+ if (if_types & V4L2_DEBUGFS_IF_AUDIO)
+ debugfs_create_file("audio", 0400, infoframes->if_dir,
+ infoframes, &infoframe_audio_fops);
+ if (if_types & V4L2_DEBUGFS_IF_SPD)
+ debugfs_create_file("spd", 0400, infoframes->if_dir,
+ infoframes, &infoframe_spd_fops);
+ if (if_types & V4L2_DEBUGFS_IF_HDMI)
+ debugfs_create_file("hdmi", 0400, infoframes->if_dir,
+ infoframes, &infoframe_hdmi_fops);
+ if (if_types & V4L2_DEBUGFS_IF_DRM)
+ debugfs_create_file("hdr_drm", 0400, infoframes->if_dir,
+ infoframes, &infoframe_drm_fops);
+ return infoframes;
+}
+EXPORT_SYMBOL_GPL(v4l2_debugfs_if_alloc);
+
+void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes)
+{
+ if (infoframes) {
+ debugfs_remove_recursive(infoframes->if_dir);
+ kfree(infoframes);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_debugfs_if_free);
+
+#endif
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 86dcb5483c42..3898ff7edddb 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* v4l2-event.c
*
@@ -6,31 +7,18 @@
* Copyright (C) 2009--2010 Nokia Corporation.
*
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/export.h>
-static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
+static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx)
{
idx += sev->first;
return idx >= sev->elems ? idx - sev->elems : idx;
@@ -39,6 +27,7 @@ static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
{
struct v4l2_kevent *kev;
+ struct timespec64 ts;
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
@@ -56,6 +45,9 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
kev->event.pending = fh->navailable;
*event = kev->event;
+ ts = ns_to_timespec64(kev->ts);
+ event->timestamp.tv_sec = ts.tv_sec;
+ event->timestamp.tv_nsec = ts.tv_nsec;
kev->sev->first = sev_pos(kev->sev, 1);
kev->sev->in_use--;
@@ -107,8 +99,8 @@ static struct v4l2_subscribed_event *v4l2_event_subscribed(
return NULL;
}
-static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
- const struct timespec *ts)
+static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
+ const struct v4l2_event *ev, u64 ts)
{
struct v4l2_subscribed_event *sev;
struct v4l2_kevent *kev;
@@ -119,14 +111,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (sev == NULL)
return;
- /*
- * If the event has been added to the fh->subscribed list, but its
- * add op has not completed yet elems will be 0, treat this as
- * not being subscribed.
- */
- if (!sev->elems)
- return;
-
/* Increase event sequence number on fh. */
fh->sequence++;
@@ -156,7 +140,7 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (copy_payload)
kev->event.u = ev->u;
kev->event.id = ev->id;
- kev->event.timestamp = *ts;
+ kev->ts = ts;
kev->event.sequence = fh->sequence;
sev->in_use++;
list_add_tail(&kev->list, &fh->available);
@@ -170,14 +154,17 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
{
struct v4l2_fh *fh;
unsigned long flags;
- struct timespec timestamp;
+ u64 ts;
+
+ if (vdev == NULL)
+ return;
- ktime_get_ts(&timestamp);
+ ts = ktime_get_ns();
spin_lock_irqsave(&vdev->fh_lock, flags);
list_for_each_entry(fh, &vdev->fh_list, list)
- __v4l2_event_queue_fh(fh, ev, &timestamp);
+ __v4l2_event_queue_fh(fh, ev, ts);
spin_unlock_irqrestore(&vdev->fh_lock, flags);
}
@@ -186,12 +173,10 @@ EXPORT_SYMBOL_GPL(v4l2_event_queue);
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
{
unsigned long flags;
- struct timespec timestamp;
-
- ktime_get_ts(&timestamp);
+ u64 ts = ktime_get_ns();
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- __v4l2_event_queue_fh(fh, ev, &timestamp);
+ __v4l2_event_queue_fh(fh, ev, ts);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
@@ -202,13 +187,47 @@ int v4l2_event_pending(struct v4l2_fh *fh)
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
+void v4l2_event_wake_all(struct video_device *vdev)
+{
+ struct v4l2_fh *fh;
+ unsigned long flags;
+
+ if (!vdev)
+ return;
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+
+ list_for_each_entry(fh, &vdev->fh_list, list)
+ wake_up_all(&fh->wait);
+
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_wake_all);
+
+static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
+{
+ struct v4l2_fh *fh = sev->fh;
+ unsigned int i;
+
+ lockdep_assert_held(&fh->subscribe_lock);
+ assert_spin_locked(&fh->vdev->fh_lock);
+
+ /* Remove any pending events for this subscription */
+ for (i = 0; i < sev->in_use; i++) {
+ list_del(&sev->events[sev_pos(sev, i)].list);
+ fh->navailable--;
+ }
+ list_del(&sev->list);
+}
+
int v4l2_event_subscribe(struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub, unsigned elems,
+ const struct v4l2_event_subscription *sub, unsigned int elems,
const struct v4l2_subscribed_event_ops *ops)
{
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
- unsigned i;
+ unsigned int i;
+ int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
@@ -216,9 +235,10 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
if (elems < 1)
elems = 1;
- sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
+ sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
if (!sev)
return -ENOMEM;
+ sev->elems = elems;
for (i = 0; i < elems; i++)
sev->events[i].sev = sev;
sev->type = sub->type;
@@ -227,6 +247,8 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->fh = fh;
sev->ops = ops;
+ mutex_lock(&fh->subscribe_lock);
+
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (!found_ev)
@@ -234,23 +256,21 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
- kfree(sev);
- return 0; /* Already listening */
- }
-
- if (sev->ops && sev->ops->add) {
- int ret = sev->ops->add(sev, elems);
+ /* Already listening */
+ kvfree(sev);
+ } else if (sev->ops && sev->ops->add) {
+ ret = sev->ops->add(sev, elems);
if (ret) {
- sev->ops = NULL;
- v4l2_event_unsubscribe(fh, sub);
- return ret;
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ __v4l2_event_unsubscribe(sev);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ kvfree(sev);
}
}
- /* Mark as ready for use */
- sev->elems = elems;
+ mutex_unlock(&fh->subscribe_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
@@ -282,31 +302,28 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
{
struct v4l2_subscribed_event *sev;
unsigned long flags;
- int i;
if (sub->type == V4L2_EVENT_ALL) {
v4l2_event_unsubscribe_all(fh);
return 0;
}
+ mutex_lock(&fh->subscribe_lock);
+
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
- if (sev != NULL) {
- /* Remove any pending events for this subscription */
- for (i = 0; i < sev->in_use; i++) {
- list_del(&sev->events[sev_pos(sev, i)].list);
- fh->navailable--;
- }
- list_del(&sev->list);
- }
+ if (sev != NULL)
+ __v4l2_event_unsubscribe(sev);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
- kfree(sev);
+ mutex_unlock(&fh->subscribe_lock);
+
+ kvfree(sev);
return 0;
}
@@ -318,3 +335,39 @@ int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
return v4l2_event_unsubscribe(fh, sub);
}
EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
+
+static void v4l2_event_src_replace(struct v4l2_event *old,
+ const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.src_change.changes;
+
+ old->u.src_change = new->u.src_change;
+ old->u.src_change.changes |= old_changes;
+}
+
+static void v4l2_event_src_merge(const struct v4l2_event *old,
+ struct v4l2_event *new)
+{
+ new->u.src_change.changes |= old->u.src_change.changes;
+}
+
+static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
+ .replace = v4l2_event_src_replace,
+ .merge = v4l2_event_src_merge,
+};
+
+int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
+
+int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
+{
+ return v4l2_src_change_event_subscribe(fh, sub);
+}
+EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index e57c002b4150..df3ba9d4674b 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* v4l2-fh.c
*
@@ -6,20 +7,6 @@
* Copyright (C) 2009--2010 Nokia Corporation.
*
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/bitops.h>
@@ -29,6 +16,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
{
@@ -37,20 +25,29 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
fh->ctrl_handler = vdev->ctrl_handler;
INIT_LIST_HEAD(&fh->list);
set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
+ /*
+ * determine_valid_ioctls() does not know if struct v4l2_fh
+ * is used by this driver, but here we do. So enable the
+ * prio ioctls here.
+ */
+ set_bit(_IOC_NR(VIDIOC_G_PRIORITY), vdev->valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_S_PRIORITY), vdev->valid_ioctls);
fh->prio = V4L2_PRIORITY_UNSET;
init_waitqueue_head(&fh->wait);
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
+ mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
-void v4l2_fh_add(struct v4l2_fh *fh)
+void v4l2_fh_add(struct v4l2_fh *fh, struct file *filp)
{
unsigned long flags;
- if (test_bit(V4L2_FL_USE_FH_PRIO, &fh->vdev->flags))
- v4l2_prio_open(fh->vdev->prio, &fh->prio);
+ filp->private_data = fh;
+
+ v4l2_prio_open(fh->vdev->prio, &fh->prio);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_add(&fh->list, &fh->vdev->fh_list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
@@ -62,24 +59,24 @@ int v4l2_fh_open(struct file *filp)
struct video_device *vdev = video_devdata(filp);
struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- filp->private_data = fh;
if (fh == NULL)
return -ENOMEM;
v4l2_fh_init(fh, vdev);
- v4l2_fh_add(fh);
+ v4l2_fh_add(fh, filp);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fh_open);
-void v4l2_fh_del(struct v4l2_fh *fh)
+void v4l2_fh_del(struct v4l2_fh *fh, struct file *filp)
{
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_del_init(&fh->list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- if (test_bit(V4L2_FL_USE_FH_PRIO, &fh->vdev->flags))
- v4l2_prio_close(fh->vdev->prio, fh->prio);
+ v4l2_prio_close(fh->vdev->prio, fh->prio);
+
+ filp->private_data = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_del);
@@ -87,17 +84,19 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
{
if (fh->vdev == NULL)
return;
+ v4l_disable_media_source(fh->vdev);
v4l2_event_unsubscribe_all(fh);
+ mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
int v4l2_fh_release(struct file *filp)
{
- struct v4l2_fh *fh = filp->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(filp);
if (fh) {
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, filp);
v4l2_fh_exit(fh);
kfree(fh);
}
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
new file mode 100644
index 000000000000..355595a0fefa
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * V4L2 flash LED sub-device registration helpers.
+ *
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ */
+
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define has_flash_op(v4l2_flash, op) \
+ (v4l2_flash && v4l2_flash->ops && v4l2_flash->ops->op)
+
+#define call_flash_op(v4l2_flash, op, arg) \
+ (has_flash_op(v4l2_flash, op) ? \
+ v4l2_flash->ops->op(v4l2_flash, arg) : \
+ -EINVAL)
+
+enum ctrl_init_data_id {
+ LED_MODE,
+ TORCH_INTENSITY,
+ FLASH_INTENSITY,
+ INDICATOR_INTENSITY,
+ FLASH_TIMEOUT,
+ STROBE_SOURCE,
+ /*
+ * Only above values are applicable to
+ * the 'ctrls' array in the struct v4l2_flash.
+ */
+ FLASH_STROBE,
+ STROBE_STOP,
+ STROBE_STATUS,
+ FLASH_FAULT,
+ NUM_FLASH_CTRLS,
+};
+
+static enum led_brightness __intensity_to_led_brightness(
+ struct v4l2_ctrl *ctrl, s32 intensity)
+{
+ intensity -= ctrl->minimum;
+ intensity /= (u32) ctrl->step;
+
+ /*
+ * Indicator LEDs, unlike torch LEDs, are turned on/off basing on
+ * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only.
+ * Therefore it must be possible to set it to 0 level which in
+ * the LED subsystem reflects LED_OFF state.
+ */
+ if (ctrl->minimum)
+ ++intensity;
+
+ return intensity;
+}
+
+static s32 __led_brightness_to_intensity(struct v4l2_ctrl *ctrl,
+ enum led_brightness brightness)
+{
+ /*
+ * Indicator LEDs, unlike torch LEDs, are turned on/off basing on
+ * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only.
+ * Do not decrement brightness read from the LED subsystem for
+ * indicator LED as it may equal 0. For torch LEDs this function
+ * is called only when V4L2_FLASH_LED_MODE_TORCH is set and the
+ * brightness read is guaranteed to be greater than 0. In the mode
+ * V4L2_FLASH_LED_MODE_NONE the cached torch intensity value is used.
+ */
+ if (ctrl->id != V4L2_CID_FLASH_INDICATOR_INTENSITY)
+ --brightness;
+
+ return (brightness * ctrl->step) + ctrl->minimum;
+}
+
+static int v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash,
+ struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+ struct led_classdev *led_cdev;
+ enum led_brightness brightness;
+
+ if (has_flash_op(v4l2_flash, intensity_to_led_brightness))
+ brightness = call_flash_op(v4l2_flash,
+ intensity_to_led_brightness,
+ ctrl->val);
+ else
+ brightness = __intensity_to_led_brightness(ctrl, ctrl->val);
+ /*
+ * In case a LED Flash class driver provides ops for custom
+ * brightness <-> intensity conversion, it also must have defined
+ * related v4l2 control step == 1. In such a case a backward conversion
+ * from led brightness to v4l2 intensity is required to find out the
+ * aligned intensity value.
+ */
+ if (has_flash_op(v4l2_flash, led_brightness_to_intensity))
+ ctrl->val = call_flash_op(v4l2_flash,
+ led_brightness_to_intensity,
+ brightness);
+
+ if (ctrl == ctrls[TORCH_INTENSITY]) {
+ if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
+ return 0;
+
+ if (WARN_ON_ONCE(!v4l2_flash->fled_cdev))
+ return -EINVAL;
+
+ led_cdev = &v4l2_flash->fled_cdev->led_cdev;
+ } else {
+ if (WARN_ON_ONCE(!v4l2_flash->iled_cdev))
+ return -EINVAL;
+
+ led_cdev = v4l2_flash->iled_cdev;
+ }
+
+ return led_set_brightness_sync(led_cdev, brightness);
+}
+
+static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash,
+ struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+ struct led_classdev *led_cdev;
+ int ret;
+
+ if (ctrl == ctrls[TORCH_INTENSITY]) {
+ /*
+ * Update torch brightness only if in TORCH_MODE. In other modes
+ * torch led is turned off, which would spuriously inform the
+ * user space that V4L2_CID_FLASH_TORCH_INTENSITY control value
+ * has changed to 0.
+ */
+ if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
+ return 0;
+
+ if (WARN_ON_ONCE(!v4l2_flash->fled_cdev))
+ return -EINVAL;
+
+ led_cdev = &v4l2_flash->fled_cdev->led_cdev;
+ } else {
+ if (WARN_ON_ONCE(!v4l2_flash->iled_cdev))
+ return -EINVAL;
+
+ led_cdev = v4l2_flash->iled_cdev;
+ }
+
+ ret = led_update_brightness(led_cdev);
+ if (ret < 0)
+ return ret;
+
+ if (has_flash_op(v4l2_flash, led_brightness_to_intensity))
+ ctrl->val = call_flash_op(v4l2_flash,
+ led_brightness_to_intensity,
+ led_cdev->brightness);
+ else
+ ctrl->val = __led_brightness_to_intensity(ctrl,
+ led_cdev->brightness);
+
+ return 0;
+}
+
+static int v4l2_flash_g_volatile_ctrl(struct v4l2_ctrl *c)
+{
+ struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c);
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ bool is_strobing;
+ int ret;
+
+ switch (c->id) {
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ return v4l2_flash_update_led_brightness(v4l2_flash, c);
+ }
+
+ if (!fled_cdev)
+ return -EINVAL;
+
+ switch (c->id) {
+ case V4L2_CID_FLASH_INTENSITY:
+ ret = led_update_flash_brightness(fled_cdev);
+ if (ret < 0)
+ return ret;
+ /*
+ * No conversion is needed as LED Flash class also uses
+ * microamperes for flash intensity units.
+ */
+ c->val = fled_cdev->brightness.val;
+ return 0;
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ ret = led_get_flash_strobe(fled_cdev, &is_strobing);
+ if (ret < 0)
+ return ret;
+ c->val = is_strobing;
+ return 0;
+ case V4L2_CID_FLASH_FAULT:
+ /* LED faults map directly to V4L2 flash faults */
+ return led_get_flash_fault(fled_cdev, &c->val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool __software_strobe_mode_inactive(struct v4l2_ctrl **ctrls)
+{
+ return ((ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) ||
+ (ctrls[STROBE_SOURCE] && (ctrls[STROBE_SOURCE]->val !=
+ V4L2_FLASH_STROBE_SOURCE_SOFTWARE)));
+}
+
+static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c)
+{
+ struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c);
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct led_classdev *led_cdev;
+ struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+ bool external_strobe;
+ int ret = 0;
+
+ switch (c->id) {
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ return v4l2_flash_set_led_brightness(v4l2_flash, c);
+ }
+
+ if (!fled_cdev)
+ return -EINVAL;
+
+ led_cdev = &fled_cdev->led_cdev;
+
+ switch (c->id) {
+ case V4L2_CID_FLASH_LED_MODE:
+ switch (c->val) {
+ case V4L2_FLASH_LED_MODE_NONE:
+ led_set_brightness_sync(led_cdev, LED_OFF);
+ return led_set_flash_strobe(fled_cdev, false);
+ case V4L2_FLASH_LED_MODE_FLASH:
+ /* Turn the torch LED off */
+ led_set_brightness_sync(led_cdev, LED_OFF);
+ if (ctrls[STROBE_SOURCE]) {
+ external_strobe = (ctrls[STROBE_SOURCE]->val ==
+ V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
+
+ ret = call_flash_op(v4l2_flash,
+ external_strobe_set,
+ external_strobe);
+ }
+ return ret;
+ case V4L2_FLASH_LED_MODE_TORCH:
+ if (ctrls[STROBE_SOURCE]) {
+ ret = call_flash_op(v4l2_flash,
+ external_strobe_set,
+ false);
+ if (ret < 0)
+ return ret;
+ }
+ /* Stop flash strobing */
+ ret = led_set_flash_strobe(fled_cdev, false);
+ if (ret < 0)
+ return ret;
+
+ return v4l2_flash_set_led_brightness(v4l2_flash,
+ ctrls[TORCH_INTENSITY]);
+ }
+ break;
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ external_strobe = (c->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
+ /*
+ * For some hardware arrangements setting strobe source may
+ * affect torch mode. Therefore, if not in the flash mode,
+ * cache only this setting. It will be applied upon switching
+ * to flash mode.
+ */
+ if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH)
+ return 0;
+
+ return call_flash_op(v4l2_flash, external_strobe_set,
+ external_strobe);
+ case V4L2_CID_FLASH_STROBE:
+ if (__software_strobe_mode_inactive(ctrls))
+ return -EBUSY;
+ return led_set_flash_strobe(fled_cdev, true);
+ case V4L2_CID_FLASH_STROBE_STOP:
+ if (__software_strobe_mode_inactive(ctrls))
+ return -EBUSY;
+ return led_set_flash_strobe(fled_cdev, false);
+ case V4L2_CID_FLASH_TIMEOUT:
+ /*
+ * No conversion is needed as LED Flash class also uses
+ * microseconds for flash timeout units.
+ */
+ return led_set_flash_timeout(fled_cdev, c->val);
+ case V4L2_CID_FLASH_INTENSITY:
+ /*
+ * No conversion is needed as LED Flash class also uses
+ * microamperes for flash intensity units.
+ */
+ return led_set_flash_brightness(fled_cdev, c->val);
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops v4l2_flash_ctrl_ops = {
+ .g_volatile_ctrl = v4l2_flash_g_volatile_ctrl,
+ .s_ctrl = v4l2_flash_s_ctrl,
+};
+
+static void __lfs_to_v4l2_ctrl_config(struct led_flash_setting *s,
+ struct v4l2_ctrl_config *c)
+{
+ c->min = s->min;
+ c->max = s->max;
+ c->step = s->step;
+ c->def = s->val;
+}
+
+static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash,
+ struct v4l2_flash_config *flash_cfg,
+ struct v4l2_flash_ctrl_data *ctrl_init_data)
+{
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
+ struct v4l2_ctrl_config *ctrl_cfg;
+ u32 mask;
+
+ /* Init INDICATOR_INTENSITY ctrl data */
+ if (v4l2_flash->iled_cdev) {
+ ctrl_init_data[INDICATOR_INTENSITY].cid =
+ V4L2_CID_FLASH_INDICATOR_INTENSITY;
+ ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config;
+ __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity,
+ ctrl_cfg);
+ ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY;
+ ctrl_cfg->min = 0;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ }
+
+ if (!led_cdev || WARN_ON(!(led_cdev->flags & LED_DEV_CAP_FLASH)))
+ return;
+
+ /* Init FLASH_FAULT ctrl data */
+ if (flash_cfg->flash_faults) {
+ ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT;
+ ctrl_cfg = &ctrl_init_data[FLASH_FAULT].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_FAULT;
+ ctrl_cfg->max = flash_cfg->flash_faults;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY;
+ }
+
+ /* Init FLASH_LED_MODE ctrl data */
+ mask = 1 << V4L2_FLASH_LED_MODE_NONE |
+ 1 << V4L2_FLASH_LED_MODE_TORCH;
+ if (led_cdev->flags & LED_DEV_CAP_FLASH)
+ mask |= 1 << V4L2_FLASH_LED_MODE_FLASH;
+
+ ctrl_init_data[LED_MODE].cid = V4L2_CID_FLASH_LED_MODE;
+ ctrl_cfg = &ctrl_init_data[LED_MODE].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_LED_MODE;
+ ctrl_cfg->max = V4L2_FLASH_LED_MODE_TORCH;
+ ctrl_cfg->menu_skip_mask = ~mask;
+ ctrl_cfg->def = V4L2_FLASH_LED_MODE_NONE;
+ ctrl_cfg->flags = 0;
+
+ /* Init TORCH_INTENSITY ctrl data */
+ ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY;
+ ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config;
+ __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, ctrl_cfg);
+ ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+
+ /* Init FLASH_STROBE ctrl data */
+ ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE;
+ ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_STROBE;
+
+ /* Init STROBE_STOP ctrl data */
+ ctrl_init_data[STROBE_STOP].cid = V4L2_CID_FLASH_STROBE_STOP;
+ ctrl_cfg = &ctrl_init_data[STROBE_STOP].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STOP;
+
+ /* Init FLASH_STROBE_SOURCE ctrl data */
+ if (flash_cfg->has_external_strobe) {
+ mask = (1 << V4L2_FLASH_STROBE_SOURCE_SOFTWARE) |
+ (1 << V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
+ ctrl_init_data[STROBE_SOURCE].cid =
+ V4L2_CID_FLASH_STROBE_SOURCE;
+ ctrl_cfg = &ctrl_init_data[STROBE_SOURCE].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_STROBE_SOURCE;
+ ctrl_cfg->max = V4L2_FLASH_STROBE_SOURCE_EXTERNAL;
+ ctrl_cfg->menu_skip_mask = ~mask;
+ ctrl_cfg->def = V4L2_FLASH_STROBE_SOURCE_SOFTWARE;
+ }
+
+ /* Init STROBE_STATUS ctrl data */
+ if (has_flash_op(fled_cdev, strobe_get)) {
+ ctrl_init_data[STROBE_STATUS].cid =
+ V4L2_CID_FLASH_STROBE_STATUS;
+ ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STATUS;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY;
+ }
+
+ /* Init FLASH_TIMEOUT ctrl data */
+ if (has_flash_op(fled_cdev, timeout_set)) {
+ ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT;
+ ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config;
+ __lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg);
+ ctrl_cfg->id = V4L2_CID_FLASH_TIMEOUT;
+ }
+
+ /* Init FLASH_INTENSITY ctrl data */
+ if (has_flash_op(fled_cdev, flash_brightness_set)) {
+ ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY;
+ ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config;
+ __lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg);
+ ctrl_cfg->id = V4L2_CID_FLASH_INTENSITY;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ }
+}
+
+static int v4l2_flash_init_controls(struct v4l2_flash *v4l2_flash,
+ struct v4l2_flash_config *flash_cfg)
+
+{
+ struct v4l2_flash_ctrl_data *ctrl_init_data;
+ struct v4l2_ctrl *ctrl;
+ struct v4l2_ctrl_config *ctrl_cfg;
+ int i, ret, num_ctrls = 0;
+
+ v4l2_flash->ctrls = devm_kcalloc(v4l2_flash->sd.dev,
+ STROBE_SOURCE + 1,
+ sizeof(*v4l2_flash->ctrls),
+ GFP_KERNEL);
+ if (!v4l2_flash->ctrls)
+ return -ENOMEM;
+
+ /* allocate memory dynamically so as not to exceed stack frame size */
+ ctrl_init_data = kcalloc(NUM_FLASH_CTRLS, sizeof(*ctrl_init_data),
+ GFP_KERNEL);
+ if (!ctrl_init_data)
+ return -ENOMEM;
+
+ __fill_ctrl_init_data(v4l2_flash, flash_cfg, ctrl_init_data);
+
+ for (i = 0; i < NUM_FLASH_CTRLS; ++i)
+ if (ctrl_init_data[i].cid)
+ ++num_ctrls;
+
+ v4l2_ctrl_handler_init(&v4l2_flash->hdl, num_ctrls);
+
+ for (i = 0; i < NUM_FLASH_CTRLS; ++i) {
+ ctrl_cfg = &ctrl_init_data[i].config;
+ if (!ctrl_init_data[i].cid)
+ continue;
+
+ if (ctrl_cfg->id == V4L2_CID_FLASH_LED_MODE ||
+ ctrl_cfg->id == V4L2_CID_FLASH_STROBE_SOURCE)
+ ctrl = v4l2_ctrl_new_std_menu(&v4l2_flash->hdl,
+ &v4l2_flash_ctrl_ops,
+ ctrl_cfg->id,
+ ctrl_cfg->max,
+ ctrl_cfg->menu_skip_mask,
+ ctrl_cfg->def);
+ else
+ ctrl = v4l2_ctrl_new_std(&v4l2_flash->hdl,
+ &v4l2_flash_ctrl_ops,
+ ctrl_cfg->id,
+ ctrl_cfg->min,
+ ctrl_cfg->max,
+ ctrl_cfg->step,
+ ctrl_cfg->def);
+
+ if (ctrl)
+ ctrl->flags |= ctrl_cfg->flags;
+
+ if (i <= STROBE_SOURCE)
+ v4l2_flash->ctrls[i] = ctrl;
+ }
+
+ kfree(ctrl_init_data);
+
+ if (v4l2_flash->hdl.error) {
+ ret = v4l2_flash->hdl.error;
+ goto error_free_handler;
+ }
+
+ v4l2_ctrl_handler_setup(&v4l2_flash->hdl);
+
+ v4l2_flash->sd.ctrl_handler = &v4l2_flash->hdl;
+
+ return 0;
+
+error_free_handler:
+ v4l2_ctrl_handler_free(&v4l2_flash->hdl);
+ return ret;
+}
+
+static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash)
+{
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+ int ret = 0;
+
+ if (ctrls[TORCH_INTENSITY]) {
+ ret = v4l2_flash_set_led_brightness(v4l2_flash,
+ ctrls[TORCH_INTENSITY]);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls[INDICATOR_INTENSITY]) {
+ ret = v4l2_flash_set_led_brightness(v4l2_flash,
+ ctrls[INDICATOR_INTENSITY]);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls[FLASH_TIMEOUT]) {
+ if (WARN_ON_ONCE(!fled_cdev))
+ return -EINVAL;
+
+ ret = led_set_flash_timeout(fled_cdev,
+ ctrls[FLASH_TIMEOUT]->val);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls[FLASH_INTENSITY]) {
+ if (WARN_ON_ONCE(!fled_cdev))
+ return -EINVAL;
+
+ ret = led_set_flash_brightness(fled_cdev,
+ ctrls[FLASH_INTENSITY]->val);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * For some hardware arrangements setting strobe source may affect
+ * torch mode. Synchronize strobe source setting only if not in torch
+ * mode. For torch mode case it will get synchronized upon switching
+ * to flash mode.
+ */
+ if (ctrls[STROBE_SOURCE] &&
+ ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
+ ret = call_flash_op(v4l2_flash, external_strobe_set,
+ ctrls[STROBE_SOURCE]->val);
+
+ return ret;
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+
+static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd);
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
+ struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev;
+ int ret = 0;
+
+ if (!v4l2_fh_is_singular(&fh->vfh))
+ return 0;
+
+ if (led_cdev) {
+ mutex_lock(&led_cdev->led_access);
+
+ led_sysfs_disable(led_cdev);
+ led_trigger_remove(led_cdev);
+
+ mutex_unlock(&led_cdev->led_access);
+ }
+
+ if (led_cdev_ind) {
+ mutex_lock(&led_cdev_ind->led_access);
+
+ led_sysfs_disable(led_cdev_ind);
+ led_trigger_remove(led_cdev_ind);
+
+ mutex_unlock(&led_cdev_ind->led_access);
+ }
+
+ ret = __sync_device_with_v4l2_controls(v4l2_flash);
+ if (ret < 0)
+ goto out_sync_device;
+
+ return 0;
+out_sync_device:
+ if (led_cdev) {
+ mutex_lock(&led_cdev->led_access);
+ led_sysfs_enable(led_cdev);
+ mutex_unlock(&led_cdev->led_access);
+ }
+
+ if (led_cdev_ind) {
+ mutex_lock(&led_cdev_ind->led_access);
+ led_sysfs_enable(led_cdev_ind);
+ mutex_unlock(&led_cdev_ind->led_access);
+ }
+
+ return ret;
+}
+
+static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd);
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
+ struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev;
+ int ret = 0;
+
+ if (!v4l2_fh_is_singular(&fh->vfh))
+ return 0;
+
+ if (led_cdev) {
+ mutex_lock(&led_cdev->led_access);
+
+ if (v4l2_flash->ctrls[STROBE_SOURCE])
+ ret = v4l2_ctrl_s_ctrl(
+ v4l2_flash->ctrls[STROBE_SOURCE],
+ V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
+ led_sysfs_enable(led_cdev);
+
+ mutex_unlock(&led_cdev->led_access);
+ }
+
+ if (led_cdev_ind) {
+ mutex_lock(&led_cdev_ind->led_access);
+ led_sysfs_enable(led_cdev_ind);
+ mutex_unlock(&led_cdev_ind->led_access);
+ }
+
+ return ret;
+}
+
+static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = {
+ .open = v4l2_flash_open,
+ .close = v4l2_flash_close,
+};
+
+static const struct v4l2_subdev_ops v4l2_flash_subdev_ops;
+
+static struct v4l2_flash *__v4l2_flash_init(
+ struct device *dev, struct fwnode_handle *fwn,
+ struct led_classdev_flash *fled_cdev, struct led_classdev *iled_cdev,
+ const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config)
+{
+ struct v4l2_flash *v4l2_flash;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ if (!config)
+ return ERR_PTR(-EINVAL);
+
+ v4l2_flash = devm_kzalloc(dev, sizeof(*v4l2_flash), GFP_KERNEL);
+ if (!v4l2_flash)
+ return ERR_PTR(-ENOMEM);
+
+ sd = &v4l2_flash->sd;
+ v4l2_flash->fled_cdev = fled_cdev;
+ v4l2_flash->iled_cdev = iled_cdev;
+ v4l2_flash->ops = ops;
+ sd->dev = dev;
+ sd->fwnode = fwn ? fwn : dev_fwnode(dev);
+ v4l2_subdev_init(sd, &v4l2_flash_subdev_ops);
+ sd->internal_ops = &v4l2_flash_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strscpy(sd->name, config->dev_name, sizeof(sd->name));
+
+ ret = media_entity_pads_init(&sd->entity, 0, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ sd->entity.function = MEDIA_ENT_F_FLASH;
+
+ ret = v4l2_flash_init_controls(v4l2_flash, config);
+ if (ret < 0)
+ goto err_init_controls;
+
+ fwnode_handle_get(sd->fwnode);
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto err_async_register_sd;
+
+ return v4l2_flash;
+
+err_async_register_sd:
+ fwnode_handle_put(sd->fwnode);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+err_init_controls:
+ media_entity_cleanup(&sd->entity);
+
+ return ERR_PTR(ret);
+}
+
+struct v4l2_flash *v4l2_flash_init(
+ struct device *dev, struct fwnode_handle *fwn,
+ struct led_classdev_flash *fled_cdev,
+ const struct v4l2_flash_ops *ops,
+ struct v4l2_flash_config *config)
+{
+ return __v4l2_flash_init(dev, fwn, fled_cdev, NULL, ops, config);
+}
+EXPORT_SYMBOL_GPL(v4l2_flash_init);
+
+struct v4l2_flash *v4l2_flash_indicator_init(
+ struct device *dev, struct fwnode_handle *fwn,
+ struct led_classdev *iled_cdev,
+ struct v4l2_flash_config *config)
+{
+ return __v4l2_flash_init(dev, fwn, NULL, iled_cdev, NULL, config);
+}
+EXPORT_SYMBOL_GPL(v4l2_flash_indicator_init);
+
+void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
+{
+ struct v4l2_subdev *sd;
+
+ if (IS_ERR_OR_NULL(v4l2_flash))
+ return;
+
+ sd = &v4l2_flash->sd;
+
+ v4l2_async_unregister_subdev(sd);
+
+ fwnode_handle_put(sd->fwnode);
+
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+}
+EXPORT_SYMBOL_GPL(v4l2_flash_release);
+
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
+MODULE_DESCRIPTION("V4L2 Flash sub-device helpers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
new file mode 100644
index 000000000000..cb153ce42c45
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -0,0 +1,1299 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * V4L2 fwnode binding parsing library
+ *
+ * The origins of the V4L2 fwnode library are in V4L2 OF library that
+ * formerly was located in v4l2-of.c.
+ *
+ * Copyright (c) 2016 Intel Corporation.
+ * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#include "v4l2-subdev-priv.h"
+
+static const struct v4l2_fwnode_bus_conv {
+ enum v4l2_fwnode_bus_type fwnode_bus_type;
+ enum v4l2_mbus_type mbus_type;
+ const char *name;
+} buses[] = {
+ {
+ V4L2_FWNODE_BUS_TYPE_GUESS,
+ V4L2_MBUS_UNKNOWN,
+ "not specified",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
+ V4L2_MBUS_CSI2_CPHY,
+ "MIPI CSI-2 C-PHY",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CSI1,
+ V4L2_MBUS_CSI1,
+ "MIPI CSI-1",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CCP2,
+ V4L2_MBUS_CCP2,
+ "compact camera port 2",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
+ V4L2_MBUS_CSI2_DPHY,
+ "MIPI CSI-2 D-PHY",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_PARALLEL,
+ V4L2_MBUS_PARALLEL,
+ "parallel",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_BT656,
+ V4L2_MBUS_BT656,
+ "Bt.656",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_DPI,
+ V4L2_MBUS_DPI,
+ "DPI",
+ }
+};
+
+static const struct v4l2_fwnode_bus_conv *
+get_v4l2_fwnode_bus_conv_by_fwnode_bus(enum v4l2_fwnode_bus_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(buses); i++)
+ if (buses[i].fwnode_bus_type == type)
+ return &buses[i];
+
+ return NULL;
+}
+
+static enum v4l2_mbus_type
+v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type)
+{
+ const struct v4l2_fwnode_bus_conv *conv =
+ get_v4l2_fwnode_bus_conv_by_fwnode_bus(type);
+
+ return conv ? conv->mbus_type : V4L2_MBUS_INVALID;
+}
+
+static const char *
+v4l2_fwnode_bus_type_to_string(enum v4l2_fwnode_bus_type type)
+{
+ const struct v4l2_fwnode_bus_conv *conv =
+ get_v4l2_fwnode_bus_conv_by_fwnode_bus(type);
+
+ return conv ? conv->name : "not found";
+}
+
+static const struct v4l2_fwnode_bus_conv *
+get_v4l2_fwnode_bus_conv_by_mbus(enum v4l2_mbus_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(buses); i++)
+ if (buses[i].mbus_type == type)
+ return &buses[i];
+
+ return NULL;
+}
+
+static const char *
+v4l2_fwnode_mbus_type_to_string(enum v4l2_mbus_type type)
+{
+ const struct v4l2_fwnode_bus_conv *conv =
+ get_v4l2_fwnode_bus_conv_by_mbus(type);
+
+ return conv ? conv->name : "not found";
+}
+
+static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep,
+ enum v4l2_mbus_type bus_type)
+{
+ struct v4l2_mbus_config_mipi_csi2 *bus = &vep->bus.mipi_csi2;
+ bool have_clk_lane = false, have_data_lanes = false,
+ have_lane_polarities = false, have_line_orders = false;
+ unsigned int flags = 0, lanes_used = 0;
+ u32 array[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
+ u32 clock_lane = 0;
+ unsigned int num_data_lanes = 0;
+ bool use_default_lane_mapping = false;
+ unsigned int i;
+ u32 v;
+ int rval;
+
+ if (bus_type == V4L2_MBUS_CSI2_DPHY ||
+ bus_type == V4L2_MBUS_CSI2_CPHY) {
+ use_default_lane_mapping = true;
+
+ num_data_lanes = min_t(u32, bus->num_data_lanes,
+ V4L2_MBUS_CSI2_MAX_DATA_LANES);
+
+ clock_lane = bus->clock_lane;
+ if (clock_lane)
+ use_default_lane_mapping = false;
+
+ for (i = 0; i < num_data_lanes; i++) {
+ array[i] = bus->data_lanes[i];
+ if (array[i])
+ use_default_lane_mapping = false;
+ }
+
+ if (use_default_lane_mapping)
+ pr_debug("no lane mapping given, using defaults\n");
+ }
+
+ rval = fwnode_property_count_u32(fwnode, "data-lanes");
+ if (rval > 0) {
+ num_data_lanes =
+ min_t(int, V4L2_MBUS_CSI2_MAX_DATA_LANES, rval);
+
+ fwnode_property_read_u32_array(fwnode, "data-lanes", array,
+ num_data_lanes);
+
+ have_data_lanes = true;
+ if (use_default_lane_mapping) {
+ pr_debug("data-lanes property exists; disabling default mapping\n");
+ use_default_lane_mapping = false;
+ }
+ }
+
+ for (i = 0; i < num_data_lanes; i++) {
+ if (lanes_used & BIT(array[i])) {
+ if (have_data_lanes || !use_default_lane_mapping)
+ pr_warn("duplicated lane %u in data-lanes, using defaults\n",
+ array[i]);
+ use_default_lane_mapping = true;
+ }
+ lanes_used |= BIT(array[i]);
+
+ if (have_data_lanes)
+ pr_debug("lane %u position %u\n", i, array[i]);
+ }
+
+ rval = fwnode_property_count_u32(fwnode, "lane-polarities");
+ if (rval > 0) {
+ if (rval != 1 + num_data_lanes /* clock+data */) {
+ pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n",
+ 1 + num_data_lanes, rval);
+ return -EINVAL;
+ }
+
+ have_lane_polarities = true;
+ }
+
+ rval = fwnode_property_count_u32(fwnode, "line-orders");
+ if (rval > 0) {
+ if (rval != num_data_lanes) {
+ pr_warn("invalid number of line-orders entries (need %u, got %u)\n",
+ num_data_lanes, rval);
+ return -EINVAL;
+ }
+
+ have_line_orders = true;
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
+ clock_lane = v;
+ pr_debug("clock lane position %u\n", v);
+ have_clk_lane = true;
+ }
+
+ if (have_clk_lane && lanes_used & BIT(clock_lane) &&
+ !use_default_lane_mapping) {
+ pr_warn("duplicated lane %u in clock-lanes, using defaults\n",
+ v);
+ use_default_lane_mapping = true;
+ }
+
+ if (fwnode_property_present(fwnode, "clock-noncontinuous")) {
+ flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+ pr_debug("non-continuous clock\n");
+ }
+
+ if (bus_type == V4L2_MBUS_CSI2_DPHY ||
+ bus_type == V4L2_MBUS_CSI2_CPHY ||
+ lanes_used || have_clk_lane || flags) {
+ /* Only D-PHY has a clock lane. */
+ unsigned int dfl_data_lane_index =
+ bus_type == V4L2_MBUS_CSI2_DPHY;
+
+ bus->flags = flags;
+ if (bus_type == V4L2_MBUS_UNKNOWN)
+ vep->bus_type = V4L2_MBUS_CSI2_DPHY;
+ bus->num_data_lanes = num_data_lanes;
+
+ if (use_default_lane_mapping) {
+ bus->clock_lane = 0;
+ for (i = 0; i < num_data_lanes; i++)
+ bus->data_lanes[i] = dfl_data_lane_index + i;
+ } else {
+ bus->clock_lane = clock_lane;
+ for (i = 0; i < num_data_lanes; i++)
+ bus->data_lanes[i] = array[i];
+ }
+
+ if (have_lane_polarities) {
+ fwnode_property_read_u32_array(fwnode,
+ "lane-polarities", array,
+ 1 + num_data_lanes);
+
+ for (i = 0; i < 1 + num_data_lanes; i++) {
+ bus->lane_polarities[i] = array[i];
+ pr_debug("lane %u polarity %sinverted",
+ i, array[i] ? "" : "not ");
+ }
+ } else {
+ pr_debug("no lane polarities defined, assuming not inverted\n");
+ }
+
+ if (have_line_orders) {
+ fwnode_property_read_u32_array(fwnode,
+ "line-orders", array,
+ num_data_lanes);
+
+ for (i = 0; i < num_data_lanes; i++) {
+ static const char * const orders[] = {
+ "ABC", "ACB", "BAC", "BCA", "CAB", "CBA"
+ };
+
+ if (array[i] >= ARRAY_SIZE(orders)) {
+ pr_warn("lane %u invalid line-order assuming ABC (got %u)\n",
+ i, array[i]);
+ bus->line_orders[i] =
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC;
+ continue;
+ }
+
+ bus->line_orders[i] = array[i];
+ pr_debug("lane %u line order %s", i,
+ orders[array[i]]);
+ }
+ } else {
+ for (i = 0; i < num_data_lanes; i++)
+ bus->line_orders[i] =
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC;
+
+ pr_debug("no line orders defined, assuming ABC\n");
+ }
+ }
+
+ return 0;
+}
+
+#define PARALLEL_MBUS_FLAGS (V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_FIELD_EVEN_HIGH | \
+ V4L2_MBUS_FIELD_EVEN_LOW)
+
+static void
+v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep,
+ enum v4l2_mbus_type bus_type)
+{
+ struct v4l2_mbus_config_parallel *bus = &vep->bus.parallel;
+ unsigned int flags = 0;
+ u32 v;
+
+ if (bus_type == V4L2_MBUS_PARALLEL || bus_type == V4L2_MBUS_BT656)
+ flags = bus->flags;
+
+ if (!fwnode_property_read_u32(fwnode, "hsync-active", &v)) {
+ flags &= ~(V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ pr_debug("hsync-active %s\n", v ? "high" : "low");
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "vsync-active", &v)) {
+ flags &= ~(V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ pr_debug("vsync-active %s\n", v ? "high" : "low");
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "field-even-active", &v)) {
+ flags &= ~(V4L2_MBUS_FIELD_EVEN_HIGH |
+ V4L2_MBUS_FIELD_EVEN_LOW);
+ flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
+ V4L2_MBUS_FIELD_EVEN_LOW;
+ pr_debug("field-even-active %s\n", v ? "high" : "low");
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "pclk-sample", &v)) {
+ flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING |
+ V4L2_MBUS_PCLK_SAMPLE_DUALEDGE);
+ switch (v) {
+ case 0:
+ flags |= V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ pr_debug("pclk-sample low\n");
+ break;
+ case 1:
+ flags |= V4L2_MBUS_PCLK_SAMPLE_RISING;
+ pr_debug("pclk-sample high\n");
+ break;
+ case 2:
+ flags |= V4L2_MBUS_PCLK_SAMPLE_DUALEDGE;
+ pr_debug("pclk-sample dual edge\n");
+ break;
+ default:
+ pr_warn("invalid argument for pclk-sample");
+ break;
+ }
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "data-active", &v)) {
+ flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW);
+ flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
+ V4L2_MBUS_DATA_ACTIVE_LOW;
+ pr_debug("data-active %s\n", v ? "high" : "low");
+ }
+
+ if (fwnode_property_present(fwnode, "slave-mode")) {
+ pr_debug("slave mode\n");
+ flags &= ~V4L2_MBUS_MASTER;
+ flags |= V4L2_MBUS_SLAVE;
+ } else {
+ flags &= ~V4L2_MBUS_SLAVE;
+ flags |= V4L2_MBUS_MASTER;
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "bus-width", &v)) {
+ bus->bus_width = v;
+ pr_debug("bus-width %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "data-shift", &v)) {
+ bus->data_shift = v;
+ pr_debug("data-shift %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "sync-on-green-active", &v)) {
+ flags &= ~(V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH |
+ V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW);
+ flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
+ V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
+ pr_debug("sync-on-green-active %s\n", v ? "high" : "low");
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v)) {
+ flags &= ~(V4L2_MBUS_DATA_ENABLE_HIGH |
+ V4L2_MBUS_DATA_ENABLE_LOW);
+ flags |= v ? V4L2_MBUS_DATA_ENABLE_HIGH :
+ V4L2_MBUS_DATA_ENABLE_LOW;
+ pr_debug("data-enable-active %s\n", v ? "high" : "low");
+ }
+
+ switch (bus_type) {
+ default:
+ bus->flags = flags;
+ if (flags & PARALLEL_MBUS_FLAGS)
+ vep->bus_type = V4L2_MBUS_PARALLEL;
+ else
+ vep->bus_type = V4L2_MBUS_BT656;
+ break;
+ case V4L2_MBUS_PARALLEL:
+ vep->bus_type = V4L2_MBUS_PARALLEL;
+ bus->flags = flags;
+ break;
+ case V4L2_MBUS_BT656:
+ vep->bus_type = V4L2_MBUS_BT656;
+ bus->flags = flags & ~PARALLEL_MBUS_FLAGS;
+ break;
+ }
+}
+
+static void
+v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep,
+ enum v4l2_mbus_type bus_type)
+{
+ struct v4l2_mbus_config_mipi_csi1 *bus = &vep->bus.mipi_csi1;
+ u32 v;
+
+ if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) {
+ bus->clock_inv = v;
+ pr_debug("clock-inv %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "strobe", &v)) {
+ bus->strobe = v;
+ pr_debug("strobe %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "data-lanes", &v)) {
+ bus->data_lane = v;
+ pr_debug("data-lanes %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
+ bus->clock_lane = v;
+ pr_debug("clock-lanes %u\n", v);
+ }
+
+ if (bus_type == V4L2_MBUS_CCP2)
+ vep->bus_type = V4L2_MBUS_CCP2;
+ else
+ vep->bus_type = V4L2_MBUS_CSI1;
+}
+
+static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ u32 bus_type = V4L2_FWNODE_BUS_TYPE_GUESS;
+ enum v4l2_mbus_type mbus_type;
+ int rval;
+
+ pr_debug("===== begin parsing endpoint %pfw\n", fwnode);
+
+ fwnode_property_read_u32(fwnode, "bus-type", &bus_type);
+ pr_debug("fwnode video bus type %s (%u), mbus type %s (%u)\n",
+ v4l2_fwnode_bus_type_to_string(bus_type), bus_type,
+ v4l2_fwnode_mbus_type_to_string(vep->bus_type),
+ vep->bus_type);
+ mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type);
+ if (mbus_type == V4L2_MBUS_INVALID) {
+ pr_debug("unsupported bus type %u\n", bus_type);
+ return -EINVAL;
+ }
+
+ if (vep->bus_type != V4L2_MBUS_UNKNOWN) {
+ if (mbus_type != V4L2_MBUS_UNKNOWN &&
+ vep->bus_type != mbus_type) {
+ pr_debug("expecting bus type %s\n",
+ v4l2_fwnode_mbus_type_to_string(vep->bus_type));
+ return -ENXIO;
+ }
+ } else {
+ vep->bus_type = mbus_type;
+ }
+
+ switch (vep->bus_type) {
+ case V4L2_MBUS_UNKNOWN:
+ rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep,
+ V4L2_MBUS_UNKNOWN);
+ if (rval)
+ return rval;
+
+ if (vep->bus_type == V4L2_MBUS_UNKNOWN)
+ v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep,
+ V4L2_MBUS_UNKNOWN);
+
+ pr_debug("assuming media bus type %s (%u)\n",
+ v4l2_fwnode_mbus_type_to_string(vep->bus_type),
+ vep->bus_type);
+
+ break;
+ case V4L2_MBUS_CCP2:
+ case V4L2_MBUS_CSI1:
+ v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, vep->bus_type);
+
+ break;
+ case V4L2_MBUS_CSI2_DPHY:
+ case V4L2_MBUS_CSI2_CPHY:
+ rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep,
+ vep->bus_type);
+ if (rval)
+ return rval;
+
+ break;
+ case V4L2_MBUS_PARALLEL:
+ case V4L2_MBUS_BT656:
+ v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep,
+ vep->bus_type);
+
+ break;
+ default:
+ pr_warn("unsupported bus type %u\n", mbus_type);
+ return -EINVAL;
+ }
+
+ fwnode_graph_parse_endpoint(fwnode, &vep->base);
+
+ return 0;
+}
+
+int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ int ret;
+
+ ret = __v4l2_fwnode_endpoint_parse(fwnode, vep);
+
+ pr_debug("===== end parsing endpoint %pfw\n", fwnode);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse);
+
+void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
+{
+ if (IS_ERR_OR_NULL(vep))
+ return;
+
+ kfree(vep->link_frequencies);
+ vep->link_frequencies = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
+
+int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ int rval;
+
+ rval = __v4l2_fwnode_endpoint_parse(fwnode, vep);
+ if (rval < 0)
+ return rval;
+
+ rval = fwnode_property_count_u64(fwnode, "link-frequencies");
+ if (rval > 0) {
+ unsigned int i;
+
+ vep->link_frequencies =
+ kmalloc_array(rval, sizeof(*vep->link_frequencies),
+ GFP_KERNEL);
+ if (!vep->link_frequencies)
+ return -ENOMEM;
+
+ vep->nr_of_link_frequencies = rval;
+
+ rval = fwnode_property_read_u64_array(fwnode,
+ "link-frequencies",
+ vep->link_frequencies,
+ vep->nr_of_link_frequencies);
+ if (rval < 0) {
+ v4l2_fwnode_endpoint_free(vep);
+ return rval;
+ }
+
+ for (i = 0; i < vep->nr_of_link_frequencies; i++)
+ pr_debug("link-frequencies %u value %llu\n", i,
+ vep->link_frequencies[i]);
+ }
+
+ pr_debug("===== end parsing endpoint %pfw\n", fwnode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
+
+int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_link *link)
+{
+ struct fwnode_endpoint fwep;
+
+ memset(link, 0, sizeof(*link));
+
+ fwnode_graph_parse_endpoint(fwnode, &fwep);
+ link->local_id = fwep.id;
+ link->local_port = fwep.port;
+ link->local_node = fwnode_graph_get_port_parent(fwnode);
+ if (!link->local_node)
+ return -ENOLINK;
+
+ fwnode = fwnode_graph_get_remote_endpoint(fwnode);
+ if (!fwnode)
+ goto err_put_local_node;
+
+ fwnode_graph_parse_endpoint(fwnode, &fwep);
+ link->remote_id = fwep.id;
+ link->remote_port = fwep.port;
+ link->remote_node = fwnode_graph_get_port_parent(fwnode);
+ if (!link->remote_node)
+ goto err_put_remote_endpoint;
+
+ return 0;
+
+err_put_remote_endpoint:
+ fwnode_handle_put(fwnode);
+
+err_put_local_node:
+ fwnode_handle_put(link->local_node);
+
+ return -ENOLINK;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
+
+void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
+{
+ fwnode_handle_put(link->local_node);
+ fwnode_handle_put(link->remote_node);
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link);
+
+static const struct v4l2_fwnode_connector_conv {
+ enum v4l2_connector_type type;
+ const char *compatible;
+} connectors[] = {
+ {
+ .type = V4L2_CONN_COMPOSITE,
+ .compatible = "composite-video-connector",
+ }, {
+ .type = V4L2_CONN_SVIDEO,
+ .compatible = "svideo-connector",
+ },
+};
+
+static enum v4l2_connector_type
+v4l2_fwnode_string_to_connector_type(const char *con_str)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(connectors); i++)
+ if (!strcmp(con_str, connectors[i].compatible))
+ return connectors[i].type;
+
+ return V4L2_CONN_UNKNOWN;
+}
+
+static void
+v4l2_fwnode_connector_parse_analog(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *vc)
+{
+ u32 stds;
+ int ret;
+
+ ret = fwnode_property_read_u32(fwnode, "sdtv-standards", &stds);
+
+ /* The property is optional. */
+ vc->connector.analog.sdtv_stds = ret ? V4L2_STD_ALL : stds;
+}
+
+void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector)
+{
+ struct v4l2_connector_link *link, *tmp;
+
+ if (IS_ERR_OR_NULL(connector) || connector->type == V4L2_CONN_UNKNOWN)
+ return;
+
+ list_for_each_entry_safe(link, tmp, &connector->links, head) {
+ v4l2_fwnode_put_link(&link->fwnode_link);
+ list_del(&link->head);
+ kfree(link);
+ }
+
+ kfree(connector->label);
+ connector->label = NULL;
+ connector->type = V4L2_CONN_UNKNOWN;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_free);
+
+static enum v4l2_connector_type
+v4l2_fwnode_get_connector_type(struct fwnode_handle *fwnode)
+{
+ const char *type_name;
+ int err;
+
+ if (!fwnode)
+ return V4L2_CONN_UNKNOWN;
+
+ /* The connector-type is stored within the compatible string. */
+ err = fwnode_property_read_string(fwnode, "compatible", &type_name);
+ if (err)
+ return V4L2_CONN_UNKNOWN;
+
+ return v4l2_fwnode_string_to_connector_type(type_name);
+}
+
+int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector)
+{
+ struct fwnode_handle *connector_node;
+ enum v4l2_connector_type connector_type;
+ const char *label;
+ int err;
+
+ if (!fwnode)
+ return -EINVAL;
+
+ memset(connector, 0, sizeof(*connector));
+
+ INIT_LIST_HEAD(&connector->links);
+
+ connector_node = fwnode_graph_get_port_parent(fwnode);
+ connector_type = v4l2_fwnode_get_connector_type(connector_node);
+ if (connector_type == V4L2_CONN_UNKNOWN) {
+ fwnode_handle_put(connector_node);
+ connector_node = fwnode_graph_get_remote_port_parent(fwnode);
+ connector_type = v4l2_fwnode_get_connector_type(connector_node);
+ }
+
+ if (connector_type == V4L2_CONN_UNKNOWN) {
+ pr_err("Unknown connector type\n");
+ err = -ENOTCONN;
+ goto out;
+ }
+
+ connector->type = connector_type;
+ connector->name = fwnode_get_name(connector_node);
+ err = fwnode_property_read_string(connector_node, "label", &label);
+ connector->label = err ? NULL : kstrdup_const(label, GFP_KERNEL);
+
+ /* Parse the connector specific properties. */
+ switch (connector->type) {
+ case V4L2_CONN_COMPOSITE:
+ case V4L2_CONN_SVIDEO:
+ v4l2_fwnode_connector_parse_analog(connector_node, connector);
+ break;
+ /* Avoid compiler warnings */
+ case V4L2_CONN_UNKNOWN:
+ break;
+ }
+
+out:
+ fwnode_handle_put(connector_node);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_parse);
+
+int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector)
+{
+ struct fwnode_handle *connector_ep;
+ struct v4l2_connector_link *link;
+ int err;
+
+ if (!fwnode || !connector || connector->type == V4L2_CONN_UNKNOWN)
+ return -EINVAL;
+
+ connector_ep = fwnode_graph_get_remote_endpoint(fwnode);
+ if (!connector_ep)
+ return -ENOTCONN;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ err = v4l2_fwnode_parse_link(connector_ep, &link->fwnode_link);
+ if (err)
+ goto err;
+
+ fwnode_handle_put(connector_ep);
+
+ list_add(&link->head, &connector->links);
+ connector->nr_of_links++;
+
+ return 0;
+
+err:
+ kfree(link);
+ fwnode_handle_put(connector_ep);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_add_link);
+
+int v4l2_fwnode_device_parse(struct device *dev,
+ struct v4l2_fwnode_device_properties *props)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ u32 val;
+ int ret;
+
+ memset(props, 0, sizeof(*props));
+
+ props->orientation = V4L2_FWNODE_PROPERTY_UNSET;
+ ret = fwnode_property_read_u32(fwnode, "orientation", &val);
+ if (!ret) {
+ switch (val) {
+ case V4L2_FWNODE_ORIENTATION_FRONT:
+ case V4L2_FWNODE_ORIENTATION_BACK:
+ case V4L2_FWNODE_ORIENTATION_EXTERNAL:
+ break;
+ default:
+ dev_warn(dev, "Unsupported device orientation: %u\n", val);
+ return -EINVAL;
+ }
+
+ props->orientation = val;
+ dev_dbg(dev, "device orientation: %u\n", val);
+ }
+
+ props->rotation = V4L2_FWNODE_PROPERTY_UNSET;
+ ret = fwnode_property_read_u32(fwnode, "rotation", &val);
+ if (!ret) {
+ if (val >= 360) {
+ dev_warn(dev, "Unsupported device rotation: %u\n", val);
+ return -EINVAL;
+ }
+
+ props->rotation = val;
+ dev_dbg(dev, "device rotation: %u\n", val);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse);
+
+/*
+ * v4l2_fwnode_reference_parse - parse references for async sub-devices
+ * @dev: the device node the properties of which are parsed for references
+ * @notifier: the async notifier where the async subdevs will be added
+ * @prop: the name of the property
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries were found
+ * -ENOMEM if memory allocation failed
+ * -EINVAL if property parsing failed
+ */
+static int v4l2_fwnode_reference_parse(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ const char *prop)
+{
+ struct fwnode_reference_args args;
+ unsigned int index;
+ int ret;
+
+ for (index = 0;
+ !(ret = fwnode_property_get_reference_args(dev_fwnode(dev), prop,
+ NULL, 0, index, &args));
+ index++) {
+ struct v4l2_async_connection *asd;
+
+ asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode,
+ struct v4l2_async_connection);
+ fwnode_handle_put(args.fwnode);
+ if (IS_ERR(asd)) {
+ /* not an error if asd already exists */
+ if (PTR_ERR(asd) == -EEXIST)
+ continue;
+
+ return PTR_ERR(asd);
+ }
+ }
+
+ /* -ENOENT here means successful parsing */
+ if (ret != -ENOENT)
+ return ret;
+
+ /* Return -ENOENT if no references were found */
+ return index ? 0 : -ENOENT;
+}
+
+/*
+ * v4l2_fwnode_reference_get_int_prop - parse a reference with integer
+ * arguments
+ * @fwnode: fwnode to read @prop from
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @index: the index of the reference to get
+ * @props: the array of integer property names
+ * @nprops: the number of integer property names in @nprops
+ *
+ * First find an fwnode referred to by the reference at @index in @prop.
+ *
+ * Then under that fwnode, @nprops times, for each property in @props,
+ * iteratively follow child nodes starting from fwnode such that they have the
+ * property in @props array at the index of the child node distance from the
+ * root node and the value of that property matching with the integer argument
+ * of the reference, at the same index.
+ *
+ * The child fwnode reached at the end of the iteration is then returned to the
+ * caller.
+ *
+ * The core reason for this is that you cannot refer to just any node in ACPI.
+ * So to refer to an endpoint (easy in DT) you need to refer to a device, then
+ * provide a list of (property name, property value) tuples where each tuple
+ * uniquely identifies a child node. The first tuple identifies a child directly
+ * underneath the device fwnode, the next tuple identifies a child node
+ * underneath the fwnode identified by the previous tuple, etc. until you
+ * reached the fwnode you need.
+ *
+ * THIS EXAMPLE EXISTS MERELY TO DOCUMENT THIS FUNCTION. DO NOT USE IT AS A
+ * REFERENCE IN HOW ACPI TABLES SHOULD BE WRITTEN!! See documentation under
+ * Documentation/firmware-guide/acpi/dsd/ instead and especially graph.txt,
+ * data-node-references.txt and leds.txt .
+ *
+ * Scope (\_SB.PCI0.I2C2)
+ * {
+ * Device (CAM0)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () {
+ * "compatible",
+ * Package () { "nokia,smia" }
+ * },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port0", "PRT0" },
+ * }
+ * })
+ * Name (PRT0, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 0 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP00" },
+ * }
+ * })
+ * Name (EP00, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package() {
+ * \_SB.PCI0.ISP, 4, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * Scope (\_SB.PCI0)
+ * {
+ * Device (ISP)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port4", "PRT4" },
+ * }
+ * })
+ *
+ * Name (PRT4, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 4 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP40" },
+ * }
+ * })
+ *
+ * Name (EP40, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package () {
+ * \_SB.PCI0.I2C2.CAM0,
+ * 0, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * From the EP40 node under ISP device, you could parse the graph remote
+ * endpoint using v4l2_fwnode_reference_get_int_prop with these arguments:
+ *
+ * @fwnode: fwnode referring to EP40 under ISP.
+ * @prop: "remote-endpoint"
+ * @index: 0
+ * @props: "port", "endpoint"
+ * @nprops: 2
+ *
+ * And you'd get back fwnode referring to EP00 under CAM0.
+ *
+ * The same works the other way around: if you use EP00 under CAM0 as the
+ * fwnode, you'll get fwnode referring to EP40 under ISP.
+ *
+ * The same example in DT syntax would look like this:
+ *
+ * cam: cam0 {
+ * compatible = "nokia,smia";
+ *
+ * port {
+ * port = <0>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&isp 4 0>;
+ * };
+ * };
+ * };
+ *
+ * isp: isp {
+ * ports {
+ * port@4 {
+ * port = <4>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&cam 0 0>;
+ * };
+ * };
+ * };
+ * };
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwise failed
+ * -ENOMEM if memory allocation failed
+ */
+static struct fwnode_handle *
+v4l2_fwnode_reference_get_int_prop(struct fwnode_handle *fwnode,
+ const char *prop,
+ unsigned int index,
+ const char * const *props,
+ unsigned int nprops)
+{
+ struct fwnode_reference_args fwnode_args;
+ u64 *args = fwnode_args.args;
+ struct fwnode_handle *child;
+ int ret;
+
+ /*
+ * Obtain remote fwnode as well as the integer arguments.
+ *
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return -ENOENT in that case.
+ */
+ ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops,
+ index, &fwnode_args);
+ if (ret)
+ return ERR_PTR(ret == -ENODATA ? -ENOENT : ret);
+
+ /*
+ * Find a node in the tree under the referred fwnode corresponding to
+ * the integer arguments.
+ */
+ fwnode = fwnode_args.fwnode;
+ while (nprops--) {
+ u32 val;
+
+ /* Loop over all child nodes under fwnode. */
+ fwnode_for_each_child_node(fwnode, child) {
+ if (fwnode_property_read_u32(child, *props, &val))
+ continue;
+
+ /* Found property, see if its value matches. */
+ if (val == *args)
+ break;
+ }
+
+ fwnode_handle_put(fwnode);
+
+ /* No property found; return an error here. */
+ if (!child) {
+ fwnode = ERR_PTR(-ENOENT);
+ break;
+ }
+
+ props++;
+ args++;
+ fwnode = child;
+ }
+
+ return fwnode;
+}
+
+struct v4l2_fwnode_int_props {
+ const char *name;
+ const char * const *props;
+ unsigned int nprops;
+};
+
+/*
+ * v4l2_fwnode_reference_parse_int_props - parse references for async
+ * sub-devices
+ * @dev: struct device pointer
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @props: the array of integer property names
+ * @nprops: the number of integer properties
+ *
+ * Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in
+ * property @prop with integer arguments with child nodes matching in properties
+ * @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier
+ * accordingly.
+ *
+ * While it is technically possible to use this function on DT, it is only
+ * meaningful on ACPI. On Device tree you can refer to any node in the tree but
+ * on ACPI the references are limited to devices.
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwisefailed
+ * -ENOMEM if memory allocation failed
+ */
+static int
+v4l2_fwnode_reference_parse_int_props(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ const struct v4l2_fwnode_int_props *p)
+{
+ struct fwnode_handle *fwnode;
+ unsigned int index;
+ int ret;
+ const char *prop = p->name;
+ const char * const *props = p->props;
+ unsigned int nprops = p->nprops;
+
+ index = 0;
+ do {
+ fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev),
+ prop, index,
+ props, nprops);
+ if (IS_ERR(fwnode)) {
+ /*
+ * Note that right now both -ENODATA and -ENOENT may
+ * signal out-of-bounds access. Return the error in
+ * cases other than that.
+ */
+ if (PTR_ERR(fwnode) != -ENOENT &&
+ PTR_ERR(fwnode) != -ENODATA)
+ return PTR_ERR(fwnode);
+ break;
+ }
+ fwnode_handle_put(fwnode);
+ index++;
+ } while (1);
+
+ for (index = 0;
+ !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev),
+ prop, index,
+ props,
+ nprops)));
+ index++) {
+ struct v4l2_async_connection *asd;
+
+ asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
+ struct v4l2_async_connection);
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ /* not an error if asd already exists */
+ if (ret == -EEXIST)
+ continue;
+
+ return PTR_ERR(asd);
+ }
+ }
+
+ return !fwnode || PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
+}
+
+/**
+ * v4l2_async_nf_parse_fwnode_sensor - parse common references on
+ * sensors for async sub-devices
+ * @dev: the device node the properties of which are parsed for references
+ * @notifier: the async notifier where the async subdevs will be added
+ *
+ * Parse common sensor properties for remote devices related to the
+ * sensor and set up async sub-devices for them.
+ *
+ * Any notifier populated using this function must be released with a call to
+ * v4l2_async_nf_release() after it has been unregistered and the async
+ * sub-devices are no longer in use, even in the case the function returned an
+ * error.
+ *
+ * Return: 0 on success
+ * -ENOMEM if memory allocation failed
+ * -EINVAL if property parsing failed
+ */
+static int
+v4l2_async_nf_parse_fwnode_sensor(struct device *dev,
+ struct v4l2_async_notifier *notifier)
+{
+ static const char * const led_props[] = { "led" };
+ static const struct v4l2_fwnode_int_props props[] = {
+ { "flash-leds", led_props, ARRAY_SIZE(led_props) },
+ { "mipi-img-flash-leds", },
+ { "lens-focus", },
+ { "mipi-img-lens-focus", },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ if (props[i].props && is_acpi_node(dev_fwnode(dev)))
+ ret = v4l2_fwnode_reference_parse_int_props(dev,
+ notifier,
+ &props[i]);
+ else
+ ret = v4l2_fwnode_reference_parse(dev, notifier,
+ props[i].name);
+ if (ret && ret != -ENOENT) {
+ dev_warn(dev, "parsing property \"%s\" failed (%d)\n",
+ props[i].name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *notifier;
+ int ret;
+
+ if (WARN_ON(!sd->dev))
+ return -ENODEV;
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier)
+ return -ENOMEM;
+
+ v4l2_async_subdev_nf_init(notifier, sd);
+
+ ret = v4l2_subdev_get_privacy_led(sd);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_nf_register(notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto out_unregister;
+
+ sd->subdev_notifier = notifier;
+
+ return 0;
+
+out_unregister:
+ v4l2_async_nf_unregister(notifier);
+
+out_cleanup:
+ v4l2_subdev_put_privacy_led(sd);
+ v4l2_async_nf_cleanup(notifier);
+ kfree(notifier);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor);
+
+MODULE_DESCRIPTION("V4L2 fwnode binding parsing library");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/media/v4l2-core/v4l2-h264.c b/drivers/media/v4l2-core/v4l2-h264.c
new file mode 100644
index 000000000000..c00197d095e7
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-h264.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 H264 helpers.
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Author: Boris Brezillon <boris.brezillon@collabora.com>
+ */
+
+#include <linux/module.h>
+#include <linux/sort.h>
+
+#include <media/v4l2-h264.h>
+
+/*
+ * Size of the tempory buffer allocated when printing reference lists. The
+ * output will be truncated if the size is too small.
+ */
+static const int tmp_str_size = 1024;
+
+/**
+ * v4l2_h264_init_reflist_builder() - Initialize a P/B0/B1 reference list
+ * builder
+ *
+ * @b: the builder context to initialize
+ * @dec_params: decode parameters control
+ * @sps: SPS control
+ * @dpb: DPB to use when creating the reference list
+ */
+void
+v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
+ const struct v4l2_ctrl_h264_decode_params *dec_params,
+ const struct v4l2_ctrl_h264_sps *sps,
+ const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES])
+{
+ int cur_frame_num, max_frame_num;
+ unsigned int i;
+
+ max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ cur_frame_num = dec_params->frame_num;
+
+ memset(b, 0, sizeof(*b));
+ if (!(dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)) {
+ b->cur_pic_order_count = min(dec_params->bottom_field_order_cnt,
+ dec_params->top_field_order_cnt);
+ b->cur_pic_fields = V4L2_H264_FRAME_REF;
+ } else if (dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD) {
+ b->cur_pic_order_count = dec_params->bottom_field_order_cnt;
+ b->cur_pic_fields = V4L2_H264_BOTTOM_FIELD_REF;
+ } else {
+ b->cur_pic_order_count = dec_params->top_field_order_cnt;
+ b->cur_pic_fields = V4L2_H264_TOP_FIELD_REF;
+ }
+
+ for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
+ if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ b->refs[i].longterm = true;
+
+ /*
+ * Handle frame_num wraparound as described in section
+ * '8.2.4.1 Decoding process for picture numbers' of the spec.
+ * For long term references, frame_num is set to
+ * long_term_frame_idx which requires no wrapping.
+ */
+ if (!b->refs[i].longterm && dpb[i].frame_num > cur_frame_num)
+ b->refs[i].frame_num = (int)dpb[i].frame_num -
+ max_frame_num;
+ else
+ b->refs[i].frame_num = dpb[i].frame_num;
+
+ b->refs[i].top_field_order_cnt = dpb[i].top_field_order_cnt;
+ b->refs[i].bottom_field_order_cnt = dpb[i].bottom_field_order_cnt;
+
+ if (b->cur_pic_fields == V4L2_H264_FRAME_REF) {
+ u8 fields = V4L2_H264_FRAME_REF;
+
+ b->unordered_reflist[b->num_valid].index = i;
+ b->unordered_reflist[b->num_valid].fields = fields;
+ b->num_valid++;
+ continue;
+ }
+
+ if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF) {
+ u8 fields = V4L2_H264_TOP_FIELD_REF;
+
+ b->unordered_reflist[b->num_valid].index = i;
+ b->unordered_reflist[b->num_valid].fields = fields;
+ b->num_valid++;
+ }
+
+ if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF) {
+ u8 fields = V4L2_H264_BOTTOM_FIELD_REF;
+
+ b->unordered_reflist[b->num_valid].index = i;
+ b->unordered_reflist[b->num_valid].fields = fields;
+ b->num_valid++;
+ }
+ }
+
+ for (i = b->num_valid; i < ARRAY_SIZE(b->unordered_reflist); i++)
+ b->unordered_reflist[i].index = i;
+}
+EXPORT_SYMBOL_GPL(v4l2_h264_init_reflist_builder);
+
+static s32 v4l2_h264_get_poc(const struct v4l2_h264_reflist_builder *b,
+ const struct v4l2_h264_reference *ref)
+{
+ switch (ref->fields) {
+ case V4L2_H264_FRAME_REF:
+ return min(b->refs[ref->index].top_field_order_cnt,
+ b->refs[ref->index].bottom_field_order_cnt);
+ case V4L2_H264_TOP_FIELD_REF:
+ return b->refs[ref->index].top_field_order_cnt;
+ case V4L2_H264_BOTTOM_FIELD_REF:
+ return b->refs[ref->index].bottom_field_order_cnt;
+ }
+
+ /* not reached */
+ return 0;
+}
+
+static int v4l2_h264_p_ref_list_cmp(const void *ptra, const void *ptrb,
+ const void *data)
+{
+ const struct v4l2_h264_reflist_builder *builder = data;
+ u8 idxa, idxb;
+
+ idxa = ((struct v4l2_h264_reference *)ptra)->index;
+ idxb = ((struct v4l2_h264_reference *)ptrb)->index;
+
+ if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
+ idxb >= V4L2_H264_NUM_DPB_ENTRIES))
+ return 1;
+
+ if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
+ /* Short term pics first. */
+ if (!builder->refs[idxa].longterm)
+ return -1;
+ else
+ return 1;
+ }
+
+ /*
+ * For frames, short term pics are in descending pic num order and long
+ * term ones in ascending order. For fields, the same direction is used
+ * but with frame_num (wrapped). For frames, the value of pic_num and
+ * frame_num are the same (see formula (8-28) and (8-29)). For this
+ * reason we can use frame_num only and share this function between
+ * frames and fields reflist.
+ */
+ if (!builder->refs[idxa].longterm)
+ return builder->refs[idxb].frame_num <
+ builder->refs[idxa].frame_num ?
+ -1 : 1;
+
+ return builder->refs[idxa].frame_num < builder->refs[idxb].frame_num ?
+ -1 : 1;
+}
+
+static int v4l2_h264_b0_ref_list_cmp(const void *ptra, const void *ptrb,
+ const void *data)
+{
+ const struct v4l2_h264_reflist_builder *builder = data;
+ s32 poca, pocb;
+ u8 idxa, idxb;
+
+ idxa = ((struct v4l2_h264_reference *)ptra)->index;
+ idxb = ((struct v4l2_h264_reference *)ptrb)->index;
+
+ if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
+ idxb >= V4L2_H264_NUM_DPB_ENTRIES))
+ return 1;
+
+ if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
+ /* Short term pics first. */
+ if (!builder->refs[idxa].longterm)
+ return -1;
+ else
+ return 1;
+ }
+
+ /* Long term pics in ascending frame num order. */
+ if (builder->refs[idxa].longterm)
+ return builder->refs[idxa].frame_num <
+ builder->refs[idxb].frame_num ?
+ -1 : 1;
+
+ poca = v4l2_h264_get_poc(builder, ptra);
+ pocb = v4l2_h264_get_poc(builder, ptrb);
+
+ /*
+ * Short term pics with POC < cur POC first in POC descending order
+ * followed by short term pics with POC > cur POC in POC ascending
+ * order.
+ */
+ if ((poca < builder->cur_pic_order_count) !=
+ (pocb < builder->cur_pic_order_count))
+ return poca < pocb ? -1 : 1;
+ else if (poca < builder->cur_pic_order_count)
+ return pocb < poca ? -1 : 1;
+
+ return poca < pocb ? -1 : 1;
+}
+
+static int v4l2_h264_b1_ref_list_cmp(const void *ptra, const void *ptrb,
+ const void *data)
+{
+ const struct v4l2_h264_reflist_builder *builder = data;
+ s32 poca, pocb;
+ u8 idxa, idxb;
+
+ idxa = ((struct v4l2_h264_reference *)ptra)->index;
+ idxb = ((struct v4l2_h264_reference *)ptrb)->index;
+
+ if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
+ idxb >= V4L2_H264_NUM_DPB_ENTRIES))
+ return 1;
+
+ if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
+ /* Short term pics first. */
+ if (!builder->refs[idxa].longterm)
+ return -1;
+ else
+ return 1;
+ }
+
+ /* Long term pics in ascending frame num order. */
+ if (builder->refs[idxa].longterm)
+ return builder->refs[idxa].frame_num <
+ builder->refs[idxb].frame_num ?
+ -1 : 1;
+
+ poca = v4l2_h264_get_poc(builder, ptra);
+ pocb = v4l2_h264_get_poc(builder, ptrb);
+
+ /*
+ * Short term pics with POC > cur POC first in POC ascending order
+ * followed by short term pics with POC < cur POC in POC descending
+ * order.
+ */
+ if ((poca < builder->cur_pic_order_count) !=
+ (pocb < builder->cur_pic_order_count))
+ return pocb < poca ? -1 : 1;
+ else if (poca < builder->cur_pic_order_count)
+ return pocb < poca ? -1 : 1;
+
+ return poca < pocb ? -1 : 1;
+}
+
+/*
+ * The references need to be reordered so that references are alternating
+ * between top and bottom field references starting with the current picture
+ * parity. This has to be done for short term and long term references
+ * separately.
+ */
+static void reorder_field_reflist(const struct v4l2_h264_reflist_builder *b,
+ struct v4l2_h264_reference *reflist)
+{
+ struct v4l2_h264_reference tmplist[V4L2_H264_REF_LIST_LEN];
+ u8 lt, i = 0, j = 0, k = 0;
+
+ memcpy(tmplist, reflist, sizeof(tmplist[0]) * b->num_valid);
+
+ for (lt = 0; lt <= 1; lt++) {
+ do {
+ for (; i < b->num_valid && b->refs[tmplist[i].index].longterm == lt; i++) {
+ if (tmplist[i].fields == b->cur_pic_fields) {
+ reflist[k++] = tmplist[i++];
+ break;
+ }
+ }
+
+ for (; j < b->num_valid && b->refs[tmplist[j].index].longterm == lt; j++) {
+ if (tmplist[j].fields != b->cur_pic_fields) {
+ reflist[k++] = tmplist[j++];
+ break;
+ }
+ }
+ } while ((i < b->num_valid && b->refs[tmplist[i].index].longterm == lt) ||
+ (j < b->num_valid && b->refs[tmplist[j].index].longterm == lt));
+ }
+}
+
+static char ref_type_to_char(u8 ref_type)
+{
+ switch (ref_type) {
+ case V4L2_H264_FRAME_REF:
+ return 'f';
+ case V4L2_H264_TOP_FIELD_REF:
+ return 't';
+ case V4L2_H264_BOTTOM_FIELD_REF:
+ return 'b';
+ }
+
+ return '?';
+}
+
+static const char *format_ref_list_p(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist,
+ char **out_str)
+{
+ int n = 0, i;
+
+ *out_str = kmalloc(tmp_str_size, GFP_KERNEL);
+ if (!(*out_str))
+ return NULL;
+
+ n += snprintf(*out_str + n, tmp_str_size - n, "|");
+
+ for (i = 0; i < builder->num_valid; i++) {
+ /* this is pic_num for frame and frame_num (wrapped) for field,
+ * but for frame pic_num is equal to frame_num (wrapped).
+ */
+ int frame_num = builder->refs[reflist[i].index].frame_num;
+ bool longterm = builder->refs[reflist[i].index].longterm;
+
+ n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|",
+ frame_num, longterm ? 'l' : 's',
+ ref_type_to_char(reflist[i].fields));
+ }
+
+ return *out_str;
+}
+
+static void print_ref_list_p(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist)
+{
+ char *buf = NULL;
+
+ pr_debug("ref_pic_list_p (cur_poc %u%c) %s\n",
+ builder->cur_pic_order_count,
+ ref_type_to_char(builder->cur_pic_fields),
+ format_ref_list_p(builder, reflist, &buf));
+
+ kfree(buf);
+}
+
+static const char *format_ref_list_b(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist,
+ char **out_str)
+{
+ int n = 0, i;
+
+ *out_str = kmalloc(tmp_str_size, GFP_KERNEL);
+ if (!(*out_str))
+ return NULL;
+
+ n += snprintf(*out_str + n, tmp_str_size - n, "|");
+
+ for (i = 0; i < builder->num_valid; i++) {
+ int frame_num = builder->refs[reflist[i].index].frame_num;
+ u32 poc = v4l2_h264_get_poc(builder, reflist + i);
+ bool longterm = builder->refs[reflist[i].index].longterm;
+
+ n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|",
+ longterm ? frame_num : poc,
+ longterm ? 'l' : 's',
+ ref_type_to_char(reflist[i].fields));
+ }
+
+ return *out_str;
+}
+
+static void print_ref_list_b(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist, u8 list_num)
+{
+ char *buf = NULL;
+
+ pr_debug("ref_pic_list_b%u (cur_poc %u%c) %s",
+ list_num, builder->cur_pic_order_count,
+ ref_type_to_char(builder->cur_pic_fields),
+ format_ref_list_b(builder, reflist, &buf));
+
+ kfree(buf);
+}
+
+/**
+ * v4l2_h264_build_p_ref_list() - Build the P reference list
+ *
+ * @builder: reference list builder context
+ * @reflist: 32 sized array used to store the P reference list. Each entry
+ * is a v4l2_h264_reference structure
+ *
+ * This functions builds the P reference lists. This procedure is describe in
+ * section '8.2.4 Decoding process for reference picture lists construction'
+ * of the H264 spec. This function can be used by H264 decoder drivers that
+ * need to pass a P reference list to the hardware.
+ */
+void
+v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist)
+{
+ memcpy(reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist[0]) * builder->num_valid);
+ sort_r(reflist, builder->num_valid, sizeof(*reflist),
+ v4l2_h264_p_ref_list_cmp, NULL, builder);
+
+ if (builder->cur_pic_fields != V4L2_H264_FRAME_REF)
+ reorder_field_reflist(builder, reflist);
+
+ print_ref_list_p(builder, reflist);
+}
+EXPORT_SYMBOL_GPL(v4l2_h264_build_p_ref_list);
+
+/**
+ * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists
+ *
+ * @builder: reference list builder context
+ * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry
+ * is a v4l2_h264_reference structure
+ * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry
+ * is a v4l2_h264_reference structure
+ *
+ * This functions builds the B0/B1 reference lists. This procedure is described
+ * in section '8.2.4 Decoding process for reference picture lists construction'
+ * of the H264 spec. This function can be used by H264 decoder drivers that
+ * need to pass B0/B1 reference lists to the hardware.
+ */
+void
+v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *b0_reflist,
+ struct v4l2_h264_reference *b1_reflist)
+{
+ memcpy(b0_reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist[0]) * builder->num_valid);
+ sort_r(b0_reflist, builder->num_valid, sizeof(*b0_reflist),
+ v4l2_h264_b0_ref_list_cmp, NULL, builder);
+
+ memcpy(b1_reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist[0]) * builder->num_valid);
+ sort_r(b1_reflist, builder->num_valid, sizeof(*b1_reflist),
+ v4l2_h264_b1_ref_list_cmp, NULL, builder);
+
+ if (builder->cur_pic_fields != V4L2_H264_FRAME_REF) {
+ reorder_field_reflist(builder, b0_reflist);
+ reorder_field_reflist(builder, b1_reflist);
+ }
+
+ if (builder->num_valid > 1 &&
+ !memcmp(b1_reflist, b0_reflist, builder->num_valid))
+ swap(b1_reflist[0], b1_reflist[1]);
+
+ print_ref_list_b(builder, b0_reflist, 0);
+ print_ref_list_b(builder, b1_reflist, 1);
+}
+EXPORT_SYMBOL_GPL(v4l2_h264_build_b_ref_lists);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("V4L2 H264 Helpers");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@collabora.com>");
diff --git a/drivers/media/v4l2-core/v4l2-i2c.c b/drivers/media/v4l2-core/v4l2-i2c.c
new file mode 100644
index 000000000000..ffc64e10fcae
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-i2c.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * v4l2-i2c - I2C helpers for Video4Linux2
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /*
+ * We need to unregister the i2c client
+ * explicitly. We cannot rely on
+ * i2c_del_adapter to always unregister
+ * clients for us, since if the i2c bus is a
+ * platform bus, then it is never deleted.
+ *
+ * Device tree or ACPI based devices must not
+ * be unregistered as they have not been
+ * registered by us, and would not be
+ * re-created by just probing the V4L2 driver.
+ */
+ if (client && !dev_fwnode(&client->dev))
+ i2c_unregister_device(client);
+}
+
+void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd,
+ struct i2c_client *client,
+ const char *devname, const char *postfix)
+{
+ if (!devname)
+ devname = client->dev.driver->name;
+ if (!postfix)
+ postfix = "";
+
+ snprintf(sd->name, sizeof(sd->name), "%s%s %d-%04x", devname, postfix,
+ i2c_adapter_id(client->adapter), client->addr);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_set_name);
+
+void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
+ /* the owner is the same as the i2c_client's driver owner */
+ sd->owner = client->dev.driver->owner;
+ sd->dev = &client->dev;
+ /* i2c_client and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, client);
+ i2c_set_clientdata(client, sd);
+ v4l2_i2c_subdev_set_name(sd, client, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
+
+/* Load an i2c sub-device. */
+struct v4l2_subdev
+*v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ struct i2c_board_info *info,
+ const unsigned short *probe_addrs)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct i2c_client *client;
+
+ if (!v4l2_dev)
+ return NULL;
+
+ request_module(I2C_MODULE_PREFIX "%s", info->type);
+
+ /* Create the i2c client */
+ if (info->addr == 0 && probe_addrs)
+ client = i2c_new_scanned_device(adapter, info, probe_addrs,
+ NULL);
+ else
+ client = i2c_new_client_device(adapter, info);
+
+ /*
+ * Note: by loading the module first we are certain that c->driver
+ * will be set if the driver was found. If the module was not loaded
+ * first, then the i2c core tries to delay-load the module for us,
+ * and then c->driver is still NULL until the module is finally
+ * loaded. This delay-load mechanism doesn't work if other drivers
+ * want to use the i2c device, so explicitly loading the module
+ * is the best alternative.
+ */
+ if (!i2c_client_has_driver(client))
+ goto error;
+
+ /* Lock the module so we can safely get the v4l2_subdev pointer */
+ if (!try_module_get(client->dev.driver->owner))
+ goto error;
+ sd = i2c_get_clientdata(client);
+
+ /*
+ * Register with the v4l2_device which increases the module's
+ * use count as well.
+ */
+ if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner))
+ sd = NULL;
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(client->dev.driver->owner);
+
+error:
+ /*
+ * If we have a client but no subdev, then something went wrong and
+ * we must unregister the client.
+ */
+ if (!IS_ERR(client) && !sd)
+ i2c_unregister_device(client);
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
+
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *client_type,
+ u8 addr,
+ const unsigned short *probe_addrs)
+{
+ struct i2c_board_info info;
+
+ /*
+ * Setup the i2c board info with the device type and
+ * the device address.
+ */
+ memset(&info, 0, sizeof(info));
+ strscpy(info.type, client_type, sizeof(info.type));
+ info.addr = addr;
+
+ return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info,
+ probe_addrs);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
+
+/* Return i2c client address of v4l2_subdev. */
+unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return client ? client->addr : I2C_CLIENT_END;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
+
+/*
+ * Return a list of I2C tuner addresses to probe. Use only if the tuner
+ * addresses are unknown.
+ */
+const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
+{
+ static const unsigned short radio_addrs[] = {
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
+ 0x10,
+#endif
+ 0x60,
+ I2C_CLIENT_END
+ };
+ static const unsigned short demod_addrs[] = {
+ 0x42, 0x43, 0x4a, 0x4b,
+ I2C_CLIENT_END
+ };
+ static const unsigned short tv_addrs[] = {
+ 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
+ 0x60, 0x61, 0x62, 0x63, 0x64,
+ I2C_CLIENT_END
+ };
+
+ switch (type) {
+ case ADDRS_RADIO:
+ return radio_addrs;
+ case ADDRS_DEMOD:
+ return demod_addrs;
+ case ADDRS_TV:
+ return tv_addrs;
+ case ADDRS_TV_WITH_DEMOD:
+ return tv_addrs + 4;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
diff --git a/drivers/media/v4l2-core/v4l2-int-device.c b/drivers/media/v4l2-core/v4l2-int-device.c
deleted file mode 100644
index f4473494af7a..000000000000
--- a/drivers/media/v4l2-core/v4l2-int-device.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * drivers/media/video/v4l2-int-device.c
- *
- * V4L2 internal ioctl interface.
- *
- * Copyright (C) 2007 Nokia Corporation.
- *
- * Contact: Sakari Ailus <sakari.ailus@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/sort.h>
-#include <linux/string.h>
-#include <linux/module.h>
-
-#include <media/v4l2-int-device.h>
-
-static DEFINE_MUTEX(mutex);
-static LIST_HEAD(int_list);
-
-void v4l2_int_device_try_attach_all(void)
-{
- struct v4l2_int_device *m, *s;
-
- list_for_each_entry(m, &int_list, head) {
- if (m->type != v4l2_int_type_master)
- continue;
-
- list_for_each_entry(s, &int_list, head) {
- if (s->type != v4l2_int_type_slave)
- continue;
-
- /* Slave is connected? */
- if (s->u.slave->master)
- continue;
-
- /* Slave wants to attach to master? */
- if (s->u.slave->attach_to[0] != 0
- && strncmp(m->name, s->u.slave->attach_to,
- V4L2NAMESIZE))
- continue;
-
- if (!try_module_get(m->module))
- continue;
-
- s->u.slave->master = m;
- if (m->u.master->attach(s)) {
- s->u.slave->master = NULL;
- module_put(m->module);
- continue;
- }
- }
- }
-}
-EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
-
-static int ioctl_sort_cmp(const void *a, const void *b)
-{
- const struct v4l2_int_ioctl_desc *d1 = a, *d2 = b;
-
- if (d1->num > d2->num)
- return 1;
-
- if (d1->num < d2->num)
- return -1;
-
- return 0;
-}
-
-int v4l2_int_device_register(struct v4l2_int_device *d)
-{
- if (d->type == v4l2_int_type_slave)
- sort(d->u.slave->ioctls, d->u.slave->num_ioctls,
- sizeof(struct v4l2_int_ioctl_desc),
- &ioctl_sort_cmp, NULL);
- mutex_lock(&mutex);
- list_add(&d->head, &int_list);
- v4l2_int_device_try_attach_all();
- mutex_unlock(&mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(v4l2_int_device_register);
-
-void v4l2_int_device_unregister(struct v4l2_int_device *d)
-{
- mutex_lock(&mutex);
- list_del(&d->head);
- if (d->type == v4l2_int_type_slave
- && d->u.slave->master != NULL) {
- d->u.slave->master->u.master->detach(d);
- module_put(d->u.slave->master->module);
- d->u.slave->master = NULL;
- }
- mutex_unlock(&mutex);
-}
-EXPORT_SYMBOL_GPL(v4l2_int_device_unregister);
-
-/* Adapted from search_extable in extable.c. */
-static v4l2_int_ioctl_func *find_ioctl(struct v4l2_int_slave *slave, int cmd,
- v4l2_int_ioctl_func *no_such_ioctl)
-{
- const struct v4l2_int_ioctl_desc *first = slave->ioctls;
- const struct v4l2_int_ioctl_desc *last =
- first + slave->num_ioctls - 1;
-
- while (first <= last) {
- const struct v4l2_int_ioctl_desc *mid;
-
- mid = (last - first) / 2 + first;
-
- if (mid->num < cmd)
- first = mid + 1;
- else if (mid->num > cmd)
- last = mid - 1;
- else
- return mid->func;
- }
-
- return no_such_ioctl;
-}
-
-static int no_such_ioctl_0(struct v4l2_int_device *d)
-{
- return -ENOIOCTLCMD;
-}
-
-int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd)
-{
- return ((v4l2_int_ioctl_func_0 *)
- find_ioctl(d->u.slave, cmd,
- (v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
-}
-EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
-
-static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
-{
- return -ENOIOCTLCMD;
-}
-
-int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg)
-{
- return ((v4l2_int_ioctl_func_1 *)
- find_ioctl(d->u.slave, cmd,
- (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
-}
-EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 68e6b5e912ff..98512ea4cc5b 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1,38 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Video capture interface for Linux version 2
*
* A generic framework to process V4L2 ioctl commands.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
- * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
*/
+#include <linux/compat.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/version.h>
+#include <linux/v4l2-subdev.h>
#include <linux/videodev2.h>
+#include <media/media-device.h> /* for media_set_bus_info() */
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-mem2mem.h>
-/* Zero out the end of the struct pointed to by p. Everything after, but
- * not including, the specified field is cleared. */
-#define CLEAR_AFTER_FIELD(p, field) \
- memset((u8 *)(p) + offsetof(typeof(*(p)), field) + sizeof((p)->field), \
- 0, sizeof(*(p)) - offsetof(typeof(*(p)), field) - sizeof((p)->field))
+#include <trace/events/v4l2.h>
#define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls)
@@ -42,37 +40,37 @@ struct std_descr {
};
static const struct std_descr standards[] = {
- { V4L2_STD_NTSC, "NTSC" },
- { V4L2_STD_NTSC_M, "NTSC-M" },
- { V4L2_STD_NTSC_M_JP, "NTSC-M-JP" },
+ { V4L2_STD_NTSC, "NTSC" },
+ { V4L2_STD_NTSC_M, "NTSC-M" },
+ { V4L2_STD_NTSC_M_JP, "NTSC-M-JP" },
{ V4L2_STD_NTSC_M_KR, "NTSC-M-KR" },
- { V4L2_STD_NTSC_443, "NTSC-443" },
- { V4L2_STD_PAL, "PAL" },
- { V4L2_STD_PAL_BG, "PAL-BG" },
- { V4L2_STD_PAL_B, "PAL-B" },
- { V4L2_STD_PAL_B1, "PAL-B1" },
- { V4L2_STD_PAL_G, "PAL-G" },
- { V4L2_STD_PAL_H, "PAL-H" },
- { V4L2_STD_PAL_I, "PAL-I" },
- { V4L2_STD_PAL_DK, "PAL-DK" },
- { V4L2_STD_PAL_D, "PAL-D" },
- { V4L2_STD_PAL_D1, "PAL-D1" },
- { V4L2_STD_PAL_K, "PAL-K" },
- { V4L2_STD_PAL_M, "PAL-M" },
- { V4L2_STD_PAL_N, "PAL-N" },
- { V4L2_STD_PAL_Nc, "PAL-Nc" },
- { V4L2_STD_PAL_60, "PAL-60" },
- { V4L2_STD_SECAM, "SECAM" },
- { V4L2_STD_SECAM_B, "SECAM-B" },
- { V4L2_STD_SECAM_G, "SECAM-G" },
- { V4L2_STD_SECAM_H, "SECAM-H" },
- { V4L2_STD_SECAM_DK, "SECAM-DK" },
- { V4L2_STD_SECAM_D, "SECAM-D" },
- { V4L2_STD_SECAM_K, "SECAM-K" },
- { V4L2_STD_SECAM_K1, "SECAM-K1" },
- { V4L2_STD_SECAM_L, "SECAM-L" },
- { V4L2_STD_SECAM_LC, "SECAM-Lc" },
- { 0, "Unknown" }
+ { V4L2_STD_NTSC_443, "NTSC-443" },
+ { V4L2_STD_PAL, "PAL" },
+ { V4L2_STD_PAL_BG, "PAL-BG" },
+ { V4L2_STD_PAL_B, "PAL-B" },
+ { V4L2_STD_PAL_B1, "PAL-B1" },
+ { V4L2_STD_PAL_G, "PAL-G" },
+ { V4L2_STD_PAL_H, "PAL-H" },
+ { V4L2_STD_PAL_I, "PAL-I" },
+ { V4L2_STD_PAL_DK, "PAL-DK" },
+ { V4L2_STD_PAL_D, "PAL-D" },
+ { V4L2_STD_PAL_D1, "PAL-D1" },
+ { V4L2_STD_PAL_K, "PAL-K" },
+ { V4L2_STD_PAL_M, "PAL-M" },
+ { V4L2_STD_PAL_N, "PAL-N" },
+ { V4L2_STD_PAL_Nc, "PAL-Nc" },
+ { V4L2_STD_PAL_60, "PAL-60" },
+ { V4L2_STD_SECAM, "SECAM" },
+ { V4L2_STD_SECAM_B, "SECAM-B" },
+ { V4L2_STD_SECAM_G, "SECAM-G" },
+ { V4L2_STD_SECAM_H, "SECAM-H" },
+ { V4L2_STD_SECAM_DK, "SECAM-DK" },
+ { V4L2_STD_SECAM_D, "SECAM-D" },
+ { V4L2_STD_SECAM_K, "SECAM-K" },
+ { V4L2_STD_SECAM_K1, "SECAM-K1" },
+ { V4L2_STD_SECAM_L, "SECAM-L" },
+ { V4L2_STD_SECAM_LC, "SECAM-Lc" },
+ { 0, "Unknown" }
};
/* video4linux standard ID conversion to standard name
@@ -83,7 +81,7 @@ const char *v4l2_norm_to_name(v4l2_std_id id)
int i;
/* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
- 64 bit comparations. So, on that architecture, with some gcc
+ 64 bit comparisons. So, on that architecture, with some gcc
variants, compilation fails. Currently, the max value is 30bit wide.
*/
BUG_ON(myid != id);
@@ -116,11 +114,47 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
vs->id = id;
v4l2_video_std_frame_period(id, &vs->frameperiod);
vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625;
- strlcpy(vs->name, name, sizeof(vs->name));
+ strscpy(vs->name, name, sizeof(vs->name));
return 0;
}
EXPORT_SYMBOL(v4l2_video_std_construct);
+/* Fill in the fields of a v4l2_standard structure according to the
+ * 'id' and 'vs->index' parameters. Returns negative on error. */
+int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id)
+{
+ v4l2_std_id curr_id = 0;
+ unsigned int index = vs->index, i, j = 0;
+ const char *descr = "";
+
+ /* Return -ENODATA if the id for the current input
+ or output is 0, meaning that it doesn't support this API. */
+ if (id == 0)
+ return -ENODATA;
+
+ /* Return norm array in a canonical way */
+ for (i = 0; i <= index && id; i++) {
+ /* last std value in the standards array is 0, so this
+ while always ends there since (id & 0) == 0. */
+ while ((id & standards[j].std) != standards[j].std)
+ j++;
+ curr_id = standards[j].std;
+ descr = standards[j].descr;
+ j++;
+ if (curr_id == 0)
+ break;
+ if (curr_id != V4L2_STD_PAL &&
+ curr_id != V4L2_STD_SECAM &&
+ curr_id != V4L2_STD_NTSC)
+ id &= ~curr_id;
+ }
+ if (i <= index)
+ return -EINVAL;
+
+ v4l2_video_std_construct(vs, curr_id, descr);
+ return 0;
+}
+
/* ----------------------------------------------------------------- */
/* some arrays for pretty-printing debug messages of enum types */
@@ -139,6 +173,7 @@ const char *v4l2_field_names[] = {
EXPORT_SYMBOL(v4l2_field_names);
const char *v4l2_type_names[] = {
+ [0] = "0",
[V4L2_BUF_TYPE_VIDEO_CAPTURE] = "vid-cap",
[V4L2_BUF_TYPE_VIDEO_OVERLAY] = "vid-overlay",
[V4L2_BUF_TYPE_VIDEO_OUTPUT] = "vid-out",
@@ -149,6 +184,10 @@ const char *v4l2_type_names[] = {
[V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay",
[V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",
[V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
+ [V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap",
+ [V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out",
+ [V4L2_BUF_TYPE_META_CAPTURE] = "meta-cap",
+ [V4L2_BUF_TYPE_META_OUTPUT] = "meta-out",
};
EXPORT_SYMBOL(v4l2_type_names);
@@ -168,8 +207,7 @@ static void v4l_print_querycap(const void *arg, bool write_only)
{
const struct v4l2_capability *p = arg;
- pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, "
- "capabilities=0x%08x, device_caps=0x%08x\n",
+ pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n",
(int)sizeof(p->driver), p->driver,
(int)sizeof(p->card), p->card,
(int)sizeof(p->bus_info), p->bus_info,
@@ -180,8 +218,7 @@ static void v4l_print_enuminput(const void *arg, bool write_only)
{
const struct v4l2_input *p = arg;
- pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, "
- "std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p->tuner, (unsigned long long)p->std, p->status,
p->capabilities);
@@ -191,8 +228,7 @@ static void v4l_print_enumoutput(const void *arg, bool write_only)
{
const struct v4l2_output *p = arg;
- pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, "
- "modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p->modulator, (unsigned long long)p->std, p->capabilities);
}
@@ -225,12 +261,9 @@ static void v4l_print_fmtdesc(const void *arg, bool write_only)
{
const struct v4l2_fmtdesc *p = arg;
- pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, description='%.*s'\n",
+ pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%p4cc, mbus_code=0x%04x, description='%.*s'\n",
p->index, prt_names(p->type, v4l2_type_names),
- p->flags, (p->pixelformat & 0xff),
- (p->pixelformat >> 8) & 0xff,
- (p->pixelformat >> 16) & 0xff,
- (p->pixelformat >> 24) & 0xff,
+ p->flags, &p->pixelformat, p->mbus_code,
(int)sizeof(p->description), p->description);
}
@@ -242,6 +275,9 @@ static void v4l_print_format(const void *arg, bool write_only)
const struct v4l2_vbi_format *vbi;
const struct v4l2_sliced_vbi_format *sliced;
const struct v4l2_window *win;
+ const struct v4l2_meta_format *meta;
+ u32 pixelformat;
+ u32 planes;
unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -249,32 +285,24 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
pix = &p->fmt.pix;
- pr_cont(", width=%u, height=%u, "
- "pixelformat=%c%c%c%c, field=%s, "
- "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
- pix->width, pix->height,
- (pix->pixelformat & 0xff),
- (pix->pixelformat >> 8) & 0xff,
- (pix->pixelformat >> 16) & 0xff,
- (pix->pixelformat >> 24) & 0xff,
+ pr_cont(", width=%u, height=%u, pixelformat=%p4cc, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
+ pix->width, pix->height, &pix->pixelformat,
prt_names(pix->field, v4l2_field_names),
pix->bytesperline, pix->sizeimage,
- pix->colorspace);
+ pix->colorspace, pix->flags, pix->ycbcr_enc,
+ pix->quantization, pix->xfer_func);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
mp = &p->fmt.pix_mp;
- pr_cont(", width=%u, height=%u, "
- "format=%c%c%c%c, field=%s, "
- "colorspace=%d, num_planes=%u\n",
- mp->width, mp->height,
- (mp->pixelformat & 0xff),
- (mp->pixelformat >> 8) & 0xff,
- (mp->pixelformat >> 16) & 0xff,
- (mp->pixelformat >> 24) & 0xff,
+ pixelformat = mp->pixelformat;
+ pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
+ mp->width, mp->height, &pixelformat,
prt_names(mp->field, v4l2_field_names),
- mp->colorspace, mp->num_planes);
- for (i = 0; i < mp->num_planes; i++)
+ mp->colorspace, mp->num_planes, mp->flags,
+ mp->ycbcr_enc, mp->quantization, mp->xfer_func);
+ planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
+ for (i = 0; i < planes; i++)
printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
mp->plane_fmt[i].bytesperline,
mp->plane_fmt[i].sizeimage);
@@ -282,26 +310,17 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
win = &p->fmt.win;
- /* Note: we can't print the clip list here since the clips
- * pointer is a userspace pointer, not a kernelspace
- * pointer. */
- pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n",
- win->w.width, win->w.height, win->w.left, win->w.top,
+ pr_cont(", (%d,%d)/%ux%u, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n",
+ win->w.left, win->w.top, win->w.width, win->w.height,
prt_names(win->field, v4l2_field_names),
- win->chromakey, win->clipcount, win->clips,
- win->bitmap, win->global_alpha);
+ win->chromakey, win->global_alpha);
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
vbi = &p->fmt.vbi;
- pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, "
- "sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
+ pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%p4cc, start=%u,%u, count=%u,%u\n",
vbi->sampling_rate, vbi->offset,
- vbi->samples_per_line,
- (vbi->sample_format & 0xff),
- (vbi->sample_format >> 8) & 0xff,
- (vbi->sample_format >> 16) & 0xff,
- (vbi->sample_format >> 24) & 0xff,
+ vbi->samples_per_line, &vbi->sample_format,
vbi->start[0], vbi->start[1],
vbi->count[0], vbi->count[1]);
break;
@@ -315,6 +334,19 @@ static void v4l_print_format(const void *arg, bool write_only)
sliced->service_lines[0][i],
sliced->service_lines[1][i]);
break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ pixelformat = p->fmt.sdr.pixelformat;
+ pr_cont(", pixelformat=%p4cc\n", &pixelformat);
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ meta = &p->fmt.meta;
+ pixelformat = meta->dataformat;
+ pr_cont(", dataformat=%p4cc, buffersize=%u, width=%u, height=%u, bytesperline=%u\n",
+ &pixelformat, meta->buffersize, meta->width,
+ meta->height, meta->bytesperline);
+ break;
}
}
@@ -322,17 +354,10 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
{
const struct v4l2_framebuffer *p = arg;
- pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
- "height=%u, pixelformat=%c%c%c%c, "
- "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
- p->capability, p->flags, p->base,
- p->fmt.width, p->fmt.height,
- (p->fmt.pixelformat & 0xff),
- (p->fmt.pixelformat >> 8) & 0xff,
- (p->fmt.pixelformat >> 16) & 0xff,
- (p->fmt.pixelformat >> 24) & 0xff,
- p->fmt.bytesperline, p->fmt.sizeimage,
- p->fmt.colorspace);
+ pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%p4cc, bytesperline=%u, sizeimage=%u, colorspace=%d\n",
+ p->capability, p->flags, p->base, p->fmt.width, p->fmt.height,
+ &p->fmt.pixelformat, p->fmt.bytesperline, p->fmt.sizeimage,
+ p->fmt.colorspace);
}
static void v4l_print_buftype(const void *arg, bool write_only)
@@ -347,8 +372,7 @@ static void v4l_print_modulator(const void *arg, bool write_only)
if (write_only)
pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
else
- pr_cont("index=%u, name=%.*s, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
+ pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->capability,
p->rangelow, p->rangehigh, p->txsubchans);
}
@@ -360,9 +384,7 @@ static void v4l_print_tuner(const void *arg, bool write_only)
if (write_only)
pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
else
- pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, signal=%u, afc=%d, "
- "rxsubchans=0x%x, audmode=%u\n",
+ pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n",
p->index, (int)sizeof(p->name), p->name, p->type,
p->capability, p->rangelow,
p->rangehigh, p->signal, p->afc,
@@ -381,8 +403,8 @@ static void v4l_print_standard(const void *arg, bool write_only)
{
const struct v4l2_standard *p = arg;
- pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, "
- "framelines=%u\n", p->index,
+ pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n",
+ p->index,
(unsigned long long)p->id, (int)sizeof(p->name), p->name,
p->frameperiod.numerator,
p->frameperiod.denominator,
@@ -398,8 +420,7 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
{
const struct v4l2_hw_freq_seek *p = arg;
- pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, "
- "rangelow=%u, rangehigh=%u\n",
+ pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n",
p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
p->rangelow, p->rangehigh);
}
@@ -421,14 +442,13 @@ static void v4l_print_buffer(const void *arg, bool write_only)
const struct v4l2_plane *plane;
int i;
- pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, "
- "flags=0x%08x, field=%s, sequence=%d, memory=%s",
- p->timestamp.tv_sec / 3600,
- (int)(p->timestamp.tv_sec / 60) % 60,
- (int)(p->timestamp.tv_sec % 60),
+ pr_cont("%02d:%02d:%02d.%06ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
+ (int)p->timestamp.tv_sec / 3600,
+ ((int)p->timestamp.tv_sec / 60) % 60,
+ ((int)p->timestamp.tv_sec % 60),
(long)p->timestamp.tv_usec,
p->index,
- prt_names(p->type, v4l2_type_names),
+ prt_names(p->type, v4l2_type_names), p->request_fd,
p->flags, prt_names(p->field, v4l2_field_names),
p->sequence, prt_names(p->memory, v4l2_memory_names));
@@ -437,8 +457,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
for (i = 0; i < p->length; ++i) {
plane = &p->m.planes[i];
printk(KERN_DEBUG
- "plane %d: bytesused=%d, data_offset=0x%08x, "
- "offset/userptr=0x%lx, length=%d\n",
+ "plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n",
i, plane->bytesused, plane->data_offset,
plane->m.userptr, plane->length);
}
@@ -447,8 +466,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
p->bytesused, p->m.userptr, p->length);
}
- printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, "
- "flags=0x%08x, frames=%d, userbits=0x%08x\n",
+ printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n",
tc->hours, tc->minutes, tc->seconds,
tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
}
@@ -466,12 +484,20 @@ static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
- pr_cont("index=%d, count=%d, memory=%s, ",
- p->index, p->count,
- prt_names(p->memory, v4l2_memory_names));
+ pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u, ",
+ p->index, p->count, prt_names(p->memory, v4l2_memory_names),
+ p->capabilities, p->max_num_buffers);
v4l_print_format(&p->format, write_only);
}
+static void v4l_print_remove_buffers(const void *arg, bool write_only)
+{
+ const struct v4l2_remove_buffers *p = arg;
+
+ pr_cont("type=%s, index=%u, count=%u\n",
+ prt_names(p->type, v4l2_type_names), p->index, p->count);
+}
+
static void v4l_print_streamparm(const void *arg, bool write_only)
{
const struct v4l2_streamparm *p = arg;
@@ -482,8 +508,7 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
const struct v4l2_captureparm *c = &p->parm.capture;
- pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, "
- "extendedmode=%d, readbuffers=%d\n",
+ pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n",
c->capability, c->capturemode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->readbuffers);
@@ -491,8 +516,7 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
const struct v4l2_outputparm *c = &p->parm.output;
- pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, "
- "extendedmode=%d, writebuffers=%d\n",
+ pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n",
c->capability, c->outputmode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->writebuffers);
@@ -505,13 +529,24 @@ static void v4l_print_queryctrl(const void *arg, bool write_only)
{
const struct v4l2_queryctrl *p = arg;
- pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, "
- "step=%d, default=%d, flags=0x%08x\n",
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n",
p->id, p->type, (int)sizeof(p->name), p->name,
p->minimum, p->maximum,
p->step, p->default_value, p->flags);
}
+static void v4l_print_query_ext_ctrl(const void *arg, bool write_only)
+{
+ const struct v4l2_query_ext_ctrl *p = arg;
+
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n",
+ p->id, p->type, (int)sizeof(p->name), p->name,
+ p->minimum, p->maximum,
+ p->step, p->default_value, p->flags,
+ p->elem_size, p->elems, p->nr_of_dims,
+ p->dims[0], p->dims[1], p->dims[2], p->dims[3]);
+}
+
static void v4l_print_querymenu(const void *arg, bool write_only)
{
const struct v4l2_querymenu *p = arg;
@@ -522,7 +557,10 @@ static void v4l_print_querymenu(const void *arg, bool write_only)
static void v4l_print_control(const void *arg, bool write_only)
{
const struct v4l2_control *p = arg;
+ const char *name = v4l2_ctrl_get_name(p->id);
+ if (name)
+ pr_cont("name=%s, ", name);
pr_cont("id=0x%x, value=%d\n", p->id, p->value);
}
@@ -531,15 +569,18 @@ static void v4l_print_ext_controls(const void *arg, bool write_only)
const struct v4l2_ext_controls *p = arg;
int i;
- pr_cont("class=0x%x, count=%d, error_idx=%d",
- p->ctrl_class, p->count, p->error_idx);
+ pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d",
+ p->which, p->count, p->error_idx, p->request_fd);
for (i = 0; i < p->count; i++) {
- if (p->controls[i].size)
- pr_cont(", id/val=0x%x/0x%x",
- p->controls[i].id, p->controls[i].value);
+ unsigned int id = p->controls[i].id;
+ const char *name = v4l2_ctrl_get_name(id);
+
+ if (name)
+ pr_cont(", name=%s", name);
+ if (!p->controls[i].size)
+ pr_cont(", id/val=0x%x/0x%x", id, p->controls[i].value);
else
- pr_cont(", id/size=0x%x/%u",
- p->controls[i].id, p->controls[i].size);
+ pr_cont(", id/size=0x%x/%u", id, p->controls[i].size);
}
pr_cont("\n");
}
@@ -548,14 +589,12 @@ static void v4l_print_cropcap(const void *arg, bool write_only)
{
const struct v4l2_cropcap *p = arg;
- pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, "
- "defrect wxh=%dx%d, x,y=%d,%d\n, "
- "pixelaspect %d/%d\n",
+ pr_cont("type=%s, bounds (%d,%d)/%ux%u, defrect (%d,%d)/%ux%u, pixelaspect %d/%d\n",
prt_names(p->type, v4l2_type_names),
- p->bounds.width, p->bounds.height,
p->bounds.left, p->bounds.top,
- p->defrect.width, p->defrect.height,
+ p->bounds.width, p->bounds.height,
p->defrect.left, p->defrect.top,
+ p->defrect.width, p->defrect.height,
p->pixelaspect.numerator, p->pixelaspect.denominator);
}
@@ -563,28 +602,27 @@ static void v4l_print_crop(const void *arg, bool write_only)
{
const struct v4l2_crop *p = arg;
- pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n",
+ pr_cont("type=%s, crop=(%d,%d)/%ux%u\n",
prt_names(p->type, v4l2_type_names),
- p->c.width, p->c.height,
- p->c.left, p->c.top);
+ p->c.left, p->c.top,
+ p->c.width, p->c.height);
}
static void v4l_print_selection(const void *arg, bool write_only)
{
const struct v4l2_selection *p = arg;
- pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n",
+ pr_cont("type=%s, target=%d, flags=0x%x, rect=(%d,%d)/%ux%u\n",
prt_names(p->type, v4l2_type_names),
p->target, p->flags,
- p->r.width, p->r.height, p->r.left, p->r.top);
+ p->r.left, p->r.top, p->r.width, p->r.height);
}
static void v4l_print_jpegcompression(const void *arg, bool write_only)
{
const struct v4l2_jpegcompression *p = arg;
- pr_cont("quality=%d, APPn=%d, APP_len=%d, "
- "COM_len=%d, jpeg_markers=0x%x\n",
+ pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n",
p->quality, p->APPn, p->APP_len,
p->COM_len, p->jpeg_markers);
}
@@ -651,14 +689,7 @@ static void v4l_print_dv_timings(const void *arg, bool write_only)
switch (p->type) {
case V4L2_DV_BT_656_1120:
- pr_cont("type=bt-656/1120, interlaced=%u, "
- "pixelclock=%llu, "
- "width=%u, height=%u, polarities=0x%x, "
- "hfrontporch=%u, hsync=%u, "
- "hbackporch=%u, vfrontporch=%u, "
- "vsync=%u, vbackporch=%u, "
- "il_vfrontporch=%u, il_vsync=%u, "
- "il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
+ pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
p->bt.interlaced, p->bt.pixelclock,
p->bt.width, p->bt.height,
p->bt.polarities, p->bt.hfrontporch,
@@ -688,8 +719,7 @@ static void v4l_print_dv_timings_cap(const void *arg, bool write_only)
switch (p->type) {
case V4L2_DV_BT_656_1120:
- pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, "
- "pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
+ pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
p->bt.min_width, p->bt.max_width,
p->bt.min_height, p->bt.max_height,
p->bt.min_pixelclock, p->bt.max_pixelclock,
@@ -705,13 +735,8 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
{
const struct v4l2_frmsizeenum *p = arg;
- pr_cont("index=%u, pixelformat=%c%c%c%c, type=%u",
- p->index,
- (p->pixel_format & 0xff),
- (p->pixel_format >> 8) & 0xff,
- (p->pixel_format >> 16) & 0xff,
- (p->pixel_format >> 24) & 0xff,
- p->type);
+ pr_cont("index=%u, pixelformat=%p4cc, type=%u",
+ p->index, &p->pixel_format, p->type);
switch (p->type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
pr_cont(", wxh=%ux%u\n",
@@ -719,12 +744,14 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
break;
case V4L2_FRMSIZE_TYPE_STEPWISE:
pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
- p->stepwise.min_width, p->stepwise.min_height,
- p->stepwise.step_width, p->stepwise.step_height,
- p->stepwise.max_width, p->stepwise.max_height);
+ p->stepwise.min_width,
+ p->stepwise.min_height,
+ p->stepwise.max_width,
+ p->stepwise.max_height,
+ p->stepwise.step_width,
+ p->stepwise.step_height);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
- /* fall through */
default:
pr_cont("\n");
break;
@@ -735,13 +762,8 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only)
{
const struct v4l2_frmivalenum *p = arg;
- pr_cont("index=%u, pixelformat=%c%c%c%c, wxh=%ux%u, type=%u",
- p->index,
- (p->pixel_format & 0xff),
- (p->pixel_format >> 8) & 0xff,
- (p->pixel_format >> 16) & 0xff,
- (p->pixel_format >> 24) & 0xff,
- p->width, p->height, p->type);
+ pr_cont("index=%u, pixelformat=%p4cc, wxh=%ux%u, type=%u",
+ p->index, &p->pixel_format, p->width, p->height, p->type);
switch (p->type) {
case V4L2_FRMIVAL_TYPE_DISCRETE:
pr_cont(", fps=%d/%d\n",
@@ -758,7 +780,6 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only)
p->stepwise.step.denominator);
break;
case V4L2_FRMIVAL_TYPE_CONTINUOUS:
- /* fall through */
default:
pr_cont("\n");
break;
@@ -770,8 +791,7 @@ static void v4l_print_event(const void *arg, bool write_only)
const struct v4l2_event *p = arg;
const struct v4l2_event_ctrl *c;
- pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, "
- "timestamp=%lu.%9.9lu\n",
+ pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%llu.%9.9llu\n",
p->type, p->pending, p->sequence, p->id,
p->timestamp.tv_sec, p->timestamp.tv_nsec);
switch (p->type) {
@@ -787,8 +807,7 @@ static void v4l_print_event(const void *arg, bool write_only)
pr_cont("value64=%lld, ", c->value64);
else
pr_cont("value=%d, ", c->value);
- pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
- "default_value=%d\n",
+ pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n",
c->flags, c->minimum, c->maximum,
c->step, c->default_value);
break;
@@ -824,13 +843,20 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
{
const struct v4l2_frequency_band *p = arg;
- pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+ pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n",
p->tuner, p->type, p->index,
p->capability, p->rangelow,
p->rangehigh, p->modulation);
}
+static void v4l_print_edid(const void *arg, bool write_only)
+{
+ const struct v4l2_edid *p = arg;
+
+ pr_cont("pad=%u, start_block=%u, blocks=%u\n",
+ p->pad, p->start_block, p->blocks);
+}
+
static void v4l_print_u32(const void *arg, bool write_only)
{
pr_cont("value=%u\n", *(const u32 *)arg);
@@ -846,38 +872,72 @@ static void v4l_print_default(const void *arg, bool write_only)
pr_cont("driver-specific ioctl\n");
}
-static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
+static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl)
{
__u32 i;
/* zero the reserved fields */
- c->reserved[0] = c->reserved[1] = 0;
+ c->reserved[0] = 0;
for (i = 0; i < c->count; i++)
c->controls[i].reserved2[0] = 0;
- /* V4L2_CID_PRIVATE_BASE cannot be used as control class
- when using extended controls.
- Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
- is it allowed for backwards compatibility.
- */
- if (!allow_priv && c->ctrl_class == V4L2_CID_PRIVATE_BASE)
- return 0;
+ switch (c->which) {
+ case V4L2_CID_PRIVATE_BASE:
+ /*
+ * V4L2_CID_PRIVATE_BASE cannot be used as control class
+ * when using extended controls.
+ * Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
+ * is it allowed for backwards compatibility.
+ */
+ if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL)
+ return false;
+ break;
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ /* Default, minimum or maximum value cannot be changed */
+ if (ioctl == VIDIOC_S_EXT_CTRLS ||
+ ioctl == VIDIOC_TRY_EXT_CTRLS) {
+ c->error_idx = c->count;
+ return false;
+ }
+ return true;
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ return true;
+ case V4L2_CTRL_WHICH_REQUEST_VAL:
+ c->error_idx = c->count;
+ return false;
+ }
+
/* Check that all controls are from the same control class. */
for (i = 0; i < c->count; i++) {
- if (V4L2_CTRL_ID2CLASS(c->controls[i].id) != c->ctrl_class) {
- c->error_idx = i;
- return 0;
+ if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) {
+ c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i :
+ c->count;
+ return false;
}
}
- return 1;
+ return true;
}
static int check_fmt(struct file *file, enum v4l2_buf_type type)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO &&
+ (vfd->device_caps & vid_caps);
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO &&
+ (vfd->device_caps & meta_caps);
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -886,12 +946,12 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
switch (type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (is_vid && is_rx &&
+ if ((is_vid || is_tch) && is_rx &&
(ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane))
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (is_vid && is_rx && ops->vidioc_g_fmt_vid_cap_mplane)
+ if ((is_vid || is_tch) && is_rx && ops->vidioc_g_fmt_vid_cap_mplane)
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
@@ -927,62 +987,220 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out)
return 0;
break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (is_sdr && is_tx && ops->vidioc_g_fmt_sdr_out)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out)
+ return 0;
+ break;
default:
break;
}
return -EINVAL;
}
-static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static void v4l_sanitize_colorspace(u32 pixelformat, u32 *colorspace,
+ u32 *encoding, u32 *quantization,
+ u32 *xfer_func)
+{
+ bool is_hsv = pixelformat == V4L2_PIX_FMT_HSV24 ||
+ pixelformat == V4L2_PIX_FMT_HSV32;
+
+ if (!v4l2_is_colorspace_valid(*colorspace)) {
+ *colorspace = V4L2_COLORSPACE_DEFAULT;
+ *encoding = V4L2_YCBCR_ENC_DEFAULT;
+ *quantization = V4L2_QUANTIZATION_DEFAULT;
+ *xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ }
+
+ if ((!is_hsv && !v4l2_is_ycbcr_enc_valid(*encoding)) ||
+ (is_hsv && !v4l2_is_hsv_enc_valid(*encoding)))
+ *encoding = V4L2_YCBCR_ENC_DEFAULT;
+
+ if (!v4l2_is_quant_valid(*quantization))
+ *quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ if (!v4l2_is_xfer_func_valid(*xfer_func))
+ *xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static void v4l_sanitize_format(struct v4l2_format *fmt)
+{
+ unsigned int offset;
+
+ /* Make sure num_planes is not bogus */
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt->fmt.pix_mp.num_planes = min_t(u32, fmt->fmt.pix_mp.num_planes,
+ VIDEO_MAX_PLANES);
+
+ /*
+ * The v4l2_pix_format structure has been extended with fields that were
+ * not previously required to be set to zero by applications. The priv
+ * field, when set to a magic value, indicates that the extended fields
+ * are valid. Otherwise they will contain undefined values. To simplify
+ * the API towards drivers zero the extended fields and set the priv
+ * field to the magic value when the extended pixel format structure
+ * isn't used by applications.
+ */
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (fmt->fmt.pix.priv != V4L2_PIX_FMT_PRIV_MAGIC) {
+ fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+
+ offset = offsetof(struct v4l2_pix_format, priv)
+ + sizeof(fmt->fmt.pix.priv);
+ memset(((void *)&fmt->fmt.pix) + offset, 0,
+ sizeof(fmt->fmt.pix) - offset);
+ }
+ }
+
+ /* Replace invalid colorspace values with defaults. */
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l_sanitize_colorspace(fmt->fmt.pix.pixelformat,
+ &fmt->fmt.pix.colorspace,
+ &fmt->fmt.pix.ycbcr_enc,
+ &fmt->fmt.pix.quantization,
+ &fmt->fmt.pix.xfer_func);
+ } else if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ u32 ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc;
+ u32 quantization = fmt->fmt.pix_mp.quantization;
+ u32 xfer_func = fmt->fmt.pix_mp.xfer_func;
+
+ v4l_sanitize_colorspace(fmt->fmt.pix_mp.pixelformat,
+ &fmt->fmt.pix_mp.colorspace, &ycbcr_enc,
+ &quantization, &xfer_func);
+
+ fmt->fmt.pix_mp.ycbcr_enc = ycbcr_enc;
+ fmt->fmt.pix_mp.quantization = quantization;
+ fmt->fmt.pix_mp.xfer_func = xfer_func;
+ }
+}
+
+static int v4l_querycap(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_capability *cap = (struct v4l2_capability *)arg;
+ struct video_device *vfd = video_devdata(file);
+ int ret;
cap->version = LINUX_VERSION_CODE;
- return ops->vidioc_querycap(file, fh, cap);
+ cap->device_caps = vfd->device_caps;
+ cap->capabilities = vfd->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ media_set_bus_info(cap->bus_info, sizeof(cap->bus_info),
+ vfd->dev_parent);
+
+ ret = ops->vidioc_querycap(file, NULL, cap);
+
+ /*
+ * Drivers must not change device_caps, so check for this and
+ * warn if this happened.
+ */
+ WARN_ON(cap->device_caps != vfd->device_caps);
+ /*
+ * Check that capabilities is a superset of
+ * vfd->device_caps | V4L2_CAP_DEVICE_CAPS
+ */
+ WARN_ON((cap->capabilities &
+ (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)) !=
+ (vfd->device_caps | V4L2_CAP_DEVICE_CAPS));
+ cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT;
+ cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT;
+
+ return ret;
+}
+
+static int v4l_g_input(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ *(int *)arg = 0;
+ return 0;
+ }
+
+ return ops->vidioc_g_input(file, NULL, arg);
}
-static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_output(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ *(int *)arg = 0;
+ return 0;
+ }
+
+ return ops->vidioc_g_output(file, NULL, arg);
+}
+
+static int v4l_s_input(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC)
+ return *(int *)arg ? -EINVAL : 0;
+
+ return ops->vidioc_s_input(file, NULL, *(unsigned int *)arg);
}
-static int v4l_s_output(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_output(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_s_output(file, fh, *(unsigned int *)arg);
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC)
+ return *(int *)arg ? -EINVAL : 0;
+
+ return ops->vidioc_s_output(file, NULL, *(unsigned int *)arg);
}
-static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_priority(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd;
u32 *p = arg;
- if (ops->vidioc_g_priority)
- return ops->vidioc_g_priority(file, fh, arg);
vfd = video_devdata(file);
- *p = v4l2_prio_max(&vfd->v4l2_dev->prio);
+ *p = v4l2_prio_max(vfd->prio);
return 0;
}
-static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_priority(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd;
struct v4l2_fh *vfh;
u32 *p = arg;
- if (ops->vidioc_s_priority)
- return ops->vidioc_s_priority(file, fh, *p);
vfd = video_devdata(file);
- vfh = file->private_data;
- return v4l2_prio_change(&vfd->v4l2_dev->prio, &vfh->prio, *p);
+ vfh = file_to_v4l2_fh(file);
+ return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
}
-static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_input *p = arg;
@@ -996,11 +1214,19 @@ static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
if (is_valid_ioctl(vfd, VIDIOC_S_STD))
p->capabilities |= V4L2_IN_CAP_STD;
- return ops->vidioc_enum_input(file, fh, p);
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ if (p->index)
+ return -EINVAL;
+ strscpy(p->name, vfd->name, sizeof(p->name));
+ p->type = V4L2_INPUT_TYPE_CAMERA;
+ return 0;
+ }
+
+ return ops->vidioc_enum_input(file, NULL, p);
}
-static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_output *p = arg;
@@ -1014,238 +1240,738 @@ static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
if (is_valid_ioctl(vfd, VIDIOC_S_STD))
p->capabilities |= V4L2_OUT_CAP_STD;
- return ops->vidioc_enum_output(file, fh, p);
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ if (p->index)
+ return -EINVAL;
+ strscpy(p->name, vfd->name, sizeof(p->name));
+ p->type = V4L2_OUTPUT_TYPE_ANALOG;
+ return 0;
+ }
+
+ return ops->vidioc_enum_output(file, NULL, p);
+}
+
+static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
+{
+ const unsigned sz = sizeof(fmt->description);
+ const char *descr = NULL;
+ u32 flags = 0;
+
+ /*
+ * We depart from the normal coding style here since the descriptions
+ * should be aligned so it is easy to see which descriptions will be
+ * longer than 31 characters (the max length for a description).
+ * And frankly, this is easier to read anyway.
+ *
+ * Note that gcc will use O(log N) comparisons to find the right case.
+ */
+ switch (fmt->pixelformat) {
+ /* Max description length mask: descr = "0123456789012345678901234567890" */
+ case V4L2_PIX_FMT_RGB332: descr = "8-bit RGB 3-3-2"; break;
+ case V4L2_PIX_FMT_RGB444: descr = "16-bit A/XRGB 4-4-4-4"; break;
+ case V4L2_PIX_FMT_ARGB444: descr = "16-bit ARGB 4-4-4-4"; break;
+ case V4L2_PIX_FMT_XRGB444: descr = "16-bit XRGB 4-4-4-4"; break;
+ case V4L2_PIX_FMT_RGBA444: descr = "16-bit RGBA 4-4-4-4"; break;
+ case V4L2_PIX_FMT_RGBX444: descr = "16-bit RGBX 4-4-4-4"; break;
+ case V4L2_PIX_FMT_ABGR444: descr = "16-bit ABGR 4-4-4-4"; break;
+ case V4L2_PIX_FMT_XBGR444: descr = "16-bit XBGR 4-4-4-4"; break;
+ case V4L2_PIX_FMT_BGRA444: descr = "16-bit BGRA 4-4-4-4"; break;
+ case V4L2_PIX_FMT_BGRX444: descr = "16-bit BGRX 4-4-4-4"; break;
+ case V4L2_PIX_FMT_RGB555: descr = "16-bit A/XRGB 1-5-5-5"; break;
+ case V4L2_PIX_FMT_ARGB555: descr = "16-bit ARGB 1-5-5-5"; break;
+ case V4L2_PIX_FMT_XRGB555: descr = "16-bit XRGB 1-5-5-5"; break;
+ case V4L2_PIX_FMT_ABGR555: descr = "16-bit ABGR 1-5-5-5"; break;
+ case V4L2_PIX_FMT_XBGR555: descr = "16-bit XBGR 1-5-5-5"; break;
+ case V4L2_PIX_FMT_RGBA555: descr = "16-bit RGBA 5-5-5-1"; break;
+ case V4L2_PIX_FMT_RGBX555: descr = "16-bit RGBX 5-5-5-1"; break;
+ case V4L2_PIX_FMT_BGRA555: descr = "16-bit BGRA 5-5-5-1"; break;
+ case V4L2_PIX_FMT_BGRX555: descr = "16-bit BGRX 5-5-5-1"; break;
+ case V4L2_PIX_FMT_RGB565: descr = "16-bit RGB 5-6-5"; break;
+ case V4L2_PIX_FMT_RGB555X: descr = "16-bit A/XRGB 1-5-5-5 BE"; break;
+ case V4L2_PIX_FMT_ARGB555X: descr = "16-bit ARGB 1-5-5-5 BE"; break;
+ case V4L2_PIX_FMT_XRGB555X: descr = "16-bit XRGB 1-5-5-5 BE"; break;
+ case V4L2_PIX_FMT_RGB565X: descr = "16-bit RGB 5-6-5 BE"; break;
+ case V4L2_PIX_FMT_BGR666: descr = "18-bit BGRX 6-6-6-14"; break;
+ case V4L2_PIX_FMT_BGR24: descr = "24-bit BGR 8-8-8"; break;
+ case V4L2_PIX_FMT_RGB24: descr = "24-bit RGB 8-8-8"; break;
+ case V4L2_PIX_FMT_BGR32: descr = "32-bit BGRA/X 8-8-8-8"; break;
+ case V4L2_PIX_FMT_ABGR32: descr = "32-bit BGRA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XBGR32: descr = "32-bit BGRX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break;
+ case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break;
+ case V4L2_PIX_FMT_BGRA32: descr = "32-bit ABGR 8-8-8-8"; break;
+ case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGBX1010102: descr = "32-bit RGBX 10-10-10-2"; break;
+ case V4L2_PIX_FMT_RGBA1010102: descr = "32-bit RGBA 10-10-10-2"; break;
+ case V4L2_PIX_FMT_ARGB2101010: descr = "32-bit ARGB 2-10-10-10"; break;
+ case V4L2_PIX_FMT_BGR48: descr = "48-bit BGR 16-16-16"; break;
+ case V4L2_PIX_FMT_RGB48: descr = "48-bit RGB 16-16-16"; break;
+ case V4L2_PIX_FMT_BGR48_12: descr = "12-bit Depth BGR"; break;
+ case V4L2_PIX_FMT_ABGR64_12: descr = "12-bit Depth BGRA"; break;
+ case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y012: descr = "12-bit Greyscale (bits 15-4)"; break;
+ case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break;
+ case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break;
+ case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break;
+ case V4L2_PIX_FMT_IPU3_Y10: descr = "10-bit greyscale (IPU3 Packed)"; break;
+ case V4L2_PIX_FMT_Y12P: descr = "12-bit Greyscale (MIPI Packed)"; break;
+ case V4L2_PIX_FMT_Y14P: descr = "14-bit Greyscale (MIPI Packed)"; break;
+ case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y16I: descr = "Interleaved 16-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
+ case V4L2_PIX_FMT_INZI: descr = "Planar 10:16 Greyscale Depth"; break;
+ case V4L2_PIX_FMT_CNF4: descr = "4-bit Depth Confidence (Packed)"; break;
+ case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break;
+ case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break;
+ case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break;
+ case V4L2_PIX_FMT_YVU420: descr = "Planar YVU 4:2:0"; break;
+ case V4L2_PIX_FMT_YUYV: descr = "YUYV 4:2:2"; break;
+ case V4L2_PIX_FMT_YYUV: descr = "YYUV 4:2:2"; break;
+ case V4L2_PIX_FMT_YVYU: descr = "YVYU 4:2:2"; break;
+ case V4L2_PIX_FMT_UYVY: descr = "UYVY 4:2:2"; break;
+ case V4L2_PIX_FMT_VYUY: descr = "VYUY 4:2:2"; break;
+ case V4L2_PIX_FMT_YUV422P: descr = "Planar YUV 4:2:2"; break;
+ case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break;
+ case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break;
+ case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break;
+ case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break;
+ case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break;
+ case V4L2_PIX_FMT_YUV24: descr = "24-bit YUV 4:4:4 8-8-8"; break;
+ case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break;
+ case V4L2_PIX_FMT_AYUV32: descr = "32-bit AYUV 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break;
+ case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_YUVA32: descr = "32-bit YUVA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_YUVX32: descr = "32-bit YUVX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break;
+ case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break;
+ case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
+ case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
+ case V4L2_PIX_FMT_YUV48_12: descr = "12-bit YUV 4:4:4 Packed"; break;
+ case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break;
+ case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break;
+ case V4L2_PIX_FMT_NV15: descr = "10-bit Y/UV 4:2:0 (Packed)"; break;
+ case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break;
+ case V4L2_PIX_FMT_NV61: descr = "Y/VU 4:2:2"; break;
+ case V4L2_PIX_FMT_NV20: descr = "10-bit Y/UV 4:2:2 (Packed)"; break;
+ case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break;
+ case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break;
+ case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break;
+ case V4L2_PIX_FMT_P012: descr = "12-bit Y/UV 4:2:0"; break;
+ case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break;
+ case V4L2_PIX_FMT_NV15_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
+ case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
+ case V4L2_PIX_FMT_NV12M: descr = "Y/UV 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_NV21M: descr = "Y/VU 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_NV16M: descr = "Y/UV 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_NV61M: descr = "Y/VU 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_NV12MT: descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break;
+ case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break;
+ case V4L2_PIX_FMT_P012M: descr = "12-bit Y/UV 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break;
+ case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break;
+ case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB8: descr = "8-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_SBGGR10: descr = "10-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break;
+ case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break;
+ case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break;
+ case V4L2_PIX_FMT_SRGGB10P: descr = "10-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_IPU3_SBGGR10: descr = "10-bit bayer BGGR IPU3 Packed"; break;
+ case V4L2_PIX_FMT_IPU3_SGBRG10: descr = "10-bit bayer GBRG IPU3 Packed"; break;
+ case V4L2_PIX_FMT_IPU3_SGRBG10: descr = "10-bit bayer GRBG IPU3 Packed"; break;
+ case V4L2_PIX_FMT_IPU3_SRGGB10: descr = "10-bit bayer RGGB IPU3 Packed"; break;
+ case V4L2_PIX_FMT_SBGGR10ALAW8: descr = "8-bit Bayer BGBG/GRGR (A-law)"; break;
+ case V4L2_PIX_FMT_SGBRG10ALAW8: descr = "8-bit Bayer GBGB/RGRG (A-law)"; break;
+ case V4L2_PIX_FMT_SGRBG10ALAW8: descr = "8-bit Bayer GRGR/BGBG (A-law)"; break;
+ case V4L2_PIX_FMT_SRGGB10ALAW8: descr = "8-bit Bayer RGRG/GBGB (A-law)"; break;
+ case V4L2_PIX_FMT_SBGGR10DPCM8: descr = "8-bit Bayer BGBG/GRGR (DPCM)"; break;
+ case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break;
+ case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break;
+ case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break;
+ case V4L2_PIX_FMT_RAW_CRU10: descr = "10-bit Raw CRU Packed"; break;
+ case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break;
+ case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break;
+ case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break;
+ case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_RAW_CRU12: descr = "12-bit Raw CRU Packed"; break;
+ case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB14: descr = "14-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break;
+ case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break;
+ case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break;
+ case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_RAW_CRU14: descr = "14-bit Raw CRU Packed"; break;
+ case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_RAW_CRU20: descr = "14-bit Raw CRU Packed"; break;
+ case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break;
+ case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break;
+ case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break;
+ case V4L2_PIX_FMT_SPCA508: descr = "GSPCA SPCA508"; break;
+ case V4L2_PIX_FMT_STV0680: descr = "GSPCA STV0680"; break;
+ case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break;
+ case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break;
+ case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break;
+ case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break;
+ case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break;
+ case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break;
+ case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break;
+ case V4L2_SDR_FMT_CU16LE: descr = "Complex U16LE"; break;
+ case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break;
+ case V4L2_SDR_FMT_CS14LE: descr = "Complex S14LE"; break;
+ case V4L2_SDR_FMT_RU12LE: descr = "Real U12LE"; break;
+ case V4L2_SDR_FMT_PCU16BE: descr = "Planar Complex U16BE"; break;
+ case V4L2_SDR_FMT_PCU18BE: descr = "Planar Complex U18BE"; break;
+ case V4L2_SDR_FMT_PCU20BE: descr = "Planar Complex U20BE"; break;
+ case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit Signed Deltas"; break;
+ case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit Signed Deltas"; break;
+ case V4L2_TCH_FMT_TU16: descr = "16-bit Unsigned Touch Data"; break;
+ case V4L2_TCH_FMT_TU08: descr = "8-bit Unsigned Touch Data"; break;
+ case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break;
+ case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
+ case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
+ case V4L2_META_FMT_UVC_MSXU_1_5: descr = "UVC MSXU Metadata"; break;
+ case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
+ case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
+ case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break;
+ case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break;
+ case V4L2_META_FMT_RK_ISP1_EXT_PARAMS: descr = "Rockchip ISP1 Ext 3A Params"; break;
+ case V4L2_META_FMT_C3ISP_PARAMS: descr = "Amlogic C3 ISP Parameters"; break;
+ case V4L2_META_FMT_C3ISP_STATS: descr = "Amlogic C3 ISP Statistics"; break;
+ case V4L2_META_FMT_MALI_C55_PARAMS: descr = "ARM Mali-C55 ISP Parameters"; break;
+ case V4L2_META_FMT_MALI_C55_STATS: descr = "ARM Mali-C55 ISP 3A Statistics"; break;
+ case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break;
+ case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break;
+ case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break;
+ case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break;
+ case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break;
+ case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break;
+ case V4L2_META_FMT_RPI_BE_CFG: descr = "RPi PiSP BE Config format"; break;
+ case V4L2_META_FMT_RPI_FE_CFG: descr = "RPi PiSP FE Config format"; break;
+ case V4L2_META_FMT_RPI_FE_STATS: descr = "RPi PiSP FE Statistics format"; break;
+ case V4L2_META_FMT_GENERIC_8: descr = "8-bit Generic Metadata"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_10: descr = "8-bit Generic Meta, 10b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_12: descr = "8-bit Generic Meta, 12b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_14: descr = "8-bit Generic Meta, 14b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_16: descr = "8-bit Generic Meta, 16b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_20: descr = "8-bit Generic Meta, 20b CSI-2"; break;
+ case V4L2_META_FMT_GENERIC_CSI2_24: descr = "8-bit Generic Meta, 24b CSI-2"; break;
+
+ default:
+ /* Compressed formats */
+ flags = V4L2_FMT_FLAG_COMPRESSED;
+ switch (fmt->pixelformat) {
+ /* Max description length mask: descr = "0123456789012345678901234567890" */
+ case V4L2_PIX_FMT_MJPEG: descr = "Motion-JPEG"; break;
+ case V4L2_PIX_FMT_JPEG: descr = "JFIF JPEG"; break;
+ case V4L2_PIX_FMT_DV: descr = "1394"; break;
+ case V4L2_PIX_FMT_MPEG: descr = "MPEG-1/2/4"; break;
+ case V4L2_PIX_FMT_H264: descr = "H.264"; break;
+ case V4L2_PIX_FMT_H264_NO_SC: descr = "H.264 (No Start Codes)"; break;
+ case V4L2_PIX_FMT_H264_MVC: descr = "H.264 MVC"; break;
+ case V4L2_PIX_FMT_H264_SLICE: descr = "H.264 Parsed Slice Data"; break;
+ case V4L2_PIX_FMT_H263: descr = "H.263"; break;
+ case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break;
+ case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break;
+ case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break;
+ case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 Part 2 ES"; break;
+ case V4L2_PIX_FMT_XVID: descr = "Xvid"; break;
+ case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break;
+ case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break;
+ case V4L2_PIX_FMT_VP8: descr = "VP8"; break;
+ case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break;
+ case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
+ case V4L2_PIX_FMT_VP9_FRAME: descr = "VP9 Frame"; break;
+ case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
+ case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break;
+ case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
+ case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */
+ case V4L2_PIX_FMT_SPK: descr = "Sorenson Spark"; break;
+ case V4L2_PIX_FMT_RV30: descr = "RealVideo 8"; break;
+ case V4L2_PIX_FMT_RV40: descr = "RealVideo 9 & 10"; break;
+ case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
+ case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break;
+ case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break;
+ case V4L2_PIX_FMT_PWC1: descr = "Raw Philips Webcam Type (Old)"; break;
+ case V4L2_PIX_FMT_PWC2: descr = "Raw Philips Webcam Type (New)"; break;
+ case V4L2_PIX_FMT_ET61X251: descr = "GSPCA ET61X251"; break;
+ case V4L2_PIX_FMT_SPCA561: descr = "GSPCA SPCA561"; break;
+ case V4L2_PIX_FMT_PAC207: descr = "GSPCA PAC207"; break;
+ case V4L2_PIX_FMT_MR97310A: descr = "GSPCA MR97310A"; break;
+ case V4L2_PIX_FMT_JL2005BCD: descr = "GSPCA JL2005BCD"; break;
+ case V4L2_PIX_FMT_SN9C2028: descr = "GSPCA SN9C2028"; break;
+ case V4L2_PIX_FMT_SQ905C: descr = "GSPCA SQ905C"; break;
+ case V4L2_PIX_FMT_PJPG: descr = "GSPCA PJPG"; break;
+ case V4L2_PIX_FMT_OV511: descr = "GSPCA OV511"; break;
+ case V4L2_PIX_FMT_OV518: descr = "GSPCA OV518"; break;
+ case V4L2_PIX_FMT_JPGL: descr = "JPEG Lite"; break;
+ case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
+ case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
+ case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
+ case V4L2_PIX_FMT_QC08C: descr = "QCOM Compressed 8-bit Format"; break;
+ case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break;
+ case V4L2_PIX_FMT_AJPG: descr = "Aspeed JPEG"; break;
+ case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break;
+ case V4L2_PIX_FMT_MT2110T: descr = "Mediatek 10bit Tile Mode"; break;
+ case V4L2_PIX_FMT_MT2110R: descr = "Mediatek 10bit Raster Mode"; break;
+ case V4L2_PIX_FMT_HEXTILE: descr = "Hextile Compressed Format"; break;
+ case V4L2_PIX_FMT_PISP_COMP1_RGGB: descr = "PiSP 8b RGRG/GBGB mode1 compr"; break;
+ case V4L2_PIX_FMT_PISP_COMP1_GRBG: descr = "PiSP 8b GRGR/BGBG mode1 compr"; break;
+ case V4L2_PIX_FMT_PISP_COMP1_GBRG: descr = "PiSP 8b GBGB/RGRG mode1 compr"; break;
+ case V4L2_PIX_FMT_PISP_COMP1_BGGR: descr = "PiSP 8b BGBG/GRGR mode1 compr"; break;
+ case V4L2_PIX_FMT_PISP_COMP1_MONO: descr = "PiSP 8b monochrome mode1 compr"; break;
+ case V4L2_PIX_FMT_PISP_COMP2_RGGB: descr = "PiSP 8b RGRG/GBGB mode2 compr"; break;
+ case V4L2_PIX_FMT_PISP_COMP2_GRBG: descr = "PiSP 8b GRGR/BGBG mode2 compr"; break;
+ case V4L2_PIX_FMT_PISP_COMP2_GBRG: descr = "PiSP 8b GBGB/RGRG mode2 compr"; break;
+ case V4L2_PIX_FMT_PISP_COMP2_BGGR: descr = "PiSP 8b BGBG/GRGR mode2 compr"; break;
+ case V4L2_PIX_FMT_PISP_COMP2_MONO: descr = "PiSP 8b monochrome mode2 compr"; break;
+ default:
+ if (fmt->description[0])
+ return;
+ WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
+ flags = 0;
+ snprintf(fmt->description, sz, "%p4cc",
+ &fmt->pixelformat);
+ break;
+ }
+ }
+
+ if (fmt->type == V4L2_BUF_TYPE_META_CAPTURE) {
+ switch (fmt->pixelformat) {
+ case V4L2_META_FMT_GENERIC_8:
+ case V4L2_META_FMT_GENERIC_CSI2_10:
+ case V4L2_META_FMT_GENERIC_CSI2_12:
+ case V4L2_META_FMT_GENERIC_CSI2_14:
+ case V4L2_META_FMT_GENERIC_CSI2_16:
+ case V4L2_META_FMT_GENERIC_CSI2_20:
+ case V4L2_META_FMT_GENERIC_CSI2_24:
+ fmt->flags |= V4L2_FMT_FLAG_META_LINE_BASED;
+ break;
+ default:
+ fmt->flags &= ~V4L2_FMT_FLAG_META_LINE_BASED;
+ }
+ }
+
+ if (descr)
+ WARN_ON(strscpy(fmt->description, descr, sz) < 0);
+ fmt->flags |= flags;
}
-static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
+ struct video_device *vdev = video_devdata(file);
struct v4l2_fmtdesc *p = arg;
- struct video_device *vfd = video_devdata(file);
- bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
- bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+ int ret = check_fmt(file, p->type);
+ u32 mbus_code;
+ u32 cap_mask;
+
+ if (ret)
+ return ret;
+ ret = -EINVAL;
+
+ if (!(vdev->device_caps & V4L2_CAP_IO_MC))
+ p->mbus_code = 0;
+
+ mbus_code = p->mbus_code;
+ memset_after(p, 0, type);
+ p->mbus_code = mbus_code;
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (unlikely(!is_rx || !ops->vidioc_enum_fmt_vid_cap))
- break;
- return ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (unlikely(!is_rx || !ops->vidioc_enum_fmt_vid_cap_mplane))
+ cap_mask = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+ if (!!(vdev->device_caps & cap_mask) !=
+ (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
+ break;
+
+ if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
break;
- return ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_vid_cap(file, NULL, arg);
+ break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- if (unlikely(!is_rx || !ops->vidioc_enum_fmt_vid_overlay))
+ if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
break;
- return ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_vid_overlay(file, NULL, arg);
+ break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (unlikely(!is_tx || !ops->vidioc_enum_fmt_vid_out))
- break;
- return ops->vidioc_enum_fmt_vid_out(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (unlikely(!is_tx || !ops->vidioc_enum_fmt_vid_out_mplane))
+ cap_mask = V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+ if (!!(vdev->device_caps & cap_mask) !=
+ (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
break;
- return ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg);
+
+ if (unlikely(!ops->vidioc_enum_fmt_vid_out))
+ break;
+ ret = ops->vidioc_enum_fmt_vid_out(file, NULL, arg);
+ break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!ops->vidioc_enum_fmt_sdr_cap))
+ break;
+ ret = ops->vidioc_enum_fmt_sdr_cap(file, NULL, arg);
+ break;
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!ops->vidioc_enum_fmt_sdr_out))
+ break;
+ ret = ops->vidioc_enum_fmt_sdr_out(file, NULL, arg);
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!ops->vidioc_enum_fmt_meta_cap))
+ break;
+ ret = ops->vidioc_enum_fmt_meta_cap(file, NULL, arg);
+ break;
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (unlikely(!ops->vidioc_enum_fmt_meta_out))
+ break;
+ ret = ops->vidioc_enum_fmt_meta_out(file, NULL, arg);
+ break;
}
- return -EINVAL;
+ if (ret == 0)
+ v4l_fill_fmtdesc(p);
+ return ret;
}
-static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+{
+ /*
+ * The v4l2_pix_format structure contains fields that make no sense for
+ * touch. Set them to default values in this case.
+ */
+
+ p->field = V4L2_FIELD_NONE;
+ p->colorspace = V4L2_COLORSPACE_RAW;
+ p->flags = 0;
+ p->ycbcr_enc = 0;
+ p->quantization = 0;
+ p->xfer_func = 0;
+}
+
+static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
- bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
- bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+ return ret;
+
+ memset(&p->fmt, 0, sizeof(p->fmt));
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap))
+ if (unlikely(!ops->vidioc_g_fmt_vid_cap))
break;
- return ops->vidioc_g_fmt_vid_cap(file, fh, arg);
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ ret = ops->vidioc_g_fmt_vid_cap(file, NULL, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
+ return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap_mplane))
- break;
- return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_cap_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_overlay))
- break;
- return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_vbi_cap))
- break;
- return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_cap))
- break;
- return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_sliced_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out))
+ if (unlikely(!ops->vidioc_g_fmt_vid_out))
break;
- return ops->vidioc_g_fmt_vid_out(file, fh, arg);
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ ret = ops->vidioc_g_fmt_vid_out(file, NULL, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_mplane))
- break;
- return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_out_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_overlay))
- break;
- return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_out_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
- if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_vbi_out))
- break;
- return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
+ return ops->vidioc_g_fmt_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out))
- break;
- return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
+ return ops->vidioc_g_fmt_sliced_vbi_out(file, NULL, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ return ops->vidioc_g_fmt_sdr_cap(file, NULL, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ return ops->vidioc_g_fmt_sdr_out(file, NULL, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ return ops->vidioc_g_fmt_meta_cap(file, NULL, arg);
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return ops->vidioc_g_fmt_meta_out(file, NULL, arg);
}
return -EINVAL;
}
-static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
- bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
- bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+ int ret = check_fmt(file, p->type);
+ unsigned int i;
+
+ if (ret)
+ return ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+ v4l_sanitize_format(p);
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap))
+ if (unlikely(!ops->vidioc_s_fmt_vid_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix);
- return ops->vidioc_s_fmt_vid_cap(file, fh, arg);
+ memset_after(p, 0, fmt.pix);
+ ret = ops->vidioc_s_fmt_vid_cap(file, NULL, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
+ return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane))
+ if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix_mp);
- return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
+ memset_after(p, 0, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ memset_after(&p->fmt.pix_mp.plane_fmt[i],
+ 0, bytesperline);
+ return ops->vidioc_s_fmt_vid_cap_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay))
+ if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
break;
- CLEAR_AFTER_FIELD(p, fmt.win);
- return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
+ memset_after(p, 0, fmt.win);
+ p->fmt.win.clips = NULL;
+ p->fmt.win.clipcount = 0;
+ p->fmt.win.bitmap = NULL;
+ return ops->vidioc_s_fmt_vid_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_vbi_cap))
+ if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
- return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
+ memset_after(p, 0, fmt.vbi.flags);
+ return ops->vidioc_s_fmt_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_cap))
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
- return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
+ memset_after(p, 0, fmt.sliced.io_size);
+ return ops->vidioc_s_fmt_sliced_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out))
+ if (unlikely(!ops->vidioc_s_fmt_vid_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix);
- return ops->vidioc_s_fmt_vid_out(file, fh, arg);
+ memset_after(p, 0, fmt.pix);
+ ret = ops->vidioc_s_fmt_vid_out(file, NULL, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane))
+ if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix_mp);
- return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
+ memset_after(p, 0, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ memset_after(&p->fmt.pix_mp.plane_fmt[i],
+ 0, bytesperline);
+ return ops->vidioc_s_fmt_vid_out_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay))
+ if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
break;
- CLEAR_AFTER_FIELD(p, fmt.win);
- return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
+ memset_after(p, 0, fmt.win);
+ p->fmt.win.clips = NULL;
+ p->fmt.win.clipcount = 0;
+ p->fmt.win.bitmap = NULL;
+ return ops->vidioc_s_fmt_vid_out_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
- if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_vbi_out))
+ if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
- return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
+ memset_after(p, 0, fmt.vbi.flags);
+ return ops->vidioc_s_fmt_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_out))
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
+ break;
+ memset_after(p, 0, fmt.sliced.io_size);
+ return ops->vidioc_s_fmt_sliced_vbi_out(file, NULL, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
+ break;
+ memset_after(p, 0, fmt.sdr.buffersize);
+ return ops->vidioc_s_fmt_sdr_cap(file, NULL, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_sdr_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
- return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
+ memset_after(p, 0, fmt.sdr.buffersize);
+ return ops->vidioc_s_fmt_sdr_out(file, NULL, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_meta_cap))
+ break;
+ memset_after(p, 0, fmt.meta);
+ return ops->vidioc_s_fmt_meta_cap(file, NULL, arg);
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_meta_out))
+ break;
+ memset_after(p, 0, fmt.meta);
+ return ops->vidioc_s_fmt_meta_out(file, NULL, arg);
}
return -EINVAL;
}
-static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
- bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
- bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+ int ret = check_fmt(file, p->type);
+ unsigned int i;
+
+ if (ret)
+ return ret;
+
+ v4l_sanitize_format(p);
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap))
+ if (unlikely(!ops->vidioc_try_fmt_vid_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix);
- return ops->vidioc_try_fmt_vid_cap(file, fh, arg);
+ memset_after(p, 0, fmt.pix);
+ ret = ops->vidioc_try_fmt_vid_cap(file, NULL, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
+ return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane))
+ if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix_mp);
- return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
+ memset_after(p, 0, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ memset_after(&p->fmt.pix_mp.plane_fmt[i],
+ 0, bytesperline);
+ return ops->vidioc_try_fmt_vid_cap_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay))
+ if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
break;
- CLEAR_AFTER_FIELD(p, fmt.win);
- return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
+ memset_after(p, 0, fmt.win);
+ p->fmt.win.clips = NULL;
+ p->fmt.win.clipcount = 0;
+ p->fmt.win.bitmap = NULL;
+ return ops->vidioc_try_fmt_vid_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_vbi_cap))
+ if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
- return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
+ memset_after(p, 0, fmt.vbi.flags);
+ return ops->vidioc_try_fmt_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_cap))
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
- return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
+ memset_after(p, 0, fmt.sliced.io_size);
+ return ops->vidioc_try_fmt_sliced_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out))
+ if (unlikely(!ops->vidioc_try_fmt_vid_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix);
- return ops->vidioc_try_fmt_vid_out(file, fh, arg);
+ memset_after(p, 0, fmt.pix);
+ ret = ops->vidioc_try_fmt_vid_out(file, NULL, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane))
+ if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix_mp);
- return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
+ memset_after(p, 0, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ memset_after(&p->fmt.pix_mp.plane_fmt[i],
+ 0, bytesperline);
+ return ops->vidioc_try_fmt_vid_out_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay))
+ if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
break;
- CLEAR_AFTER_FIELD(p, fmt.win);
- return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
+ memset_after(p, 0, fmt.win);
+ p->fmt.win.clips = NULL;
+ p->fmt.win.clipcount = 0;
+ p->fmt.win.bitmap = NULL;
+ return ops->vidioc_try_fmt_vid_out_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
- if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_vbi_out))
+ if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
- return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
+ memset_after(p, 0, fmt.vbi.flags);
+ return ops->vidioc_try_fmt_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_out))
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
- return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
+ memset_after(p, 0, fmt.sliced.io_size);
+ return ops->vidioc_try_fmt_sliced_vbi_out(file, NULL, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
+ break;
+ memset_after(p, 0, fmt.sdr.buffersize);
+ return ops->vidioc_try_fmt_sdr_cap(file, NULL, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_sdr_out))
+ break;
+ memset_after(p, 0, fmt.sdr.buffersize);
+ return ops->vidioc_try_fmt_sdr_out(file, NULL, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_meta_cap))
+ break;
+ memset_after(p, 0, fmt.meta);
+ return ops->vidioc_try_fmt_meta_cap(file, NULL, arg);
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_meta_out))
+ break;
+ memset_after(p, 0, fmt.meta);
+ return ops->vidioc_try_fmt_meta_out(file, NULL, arg);
}
return -EINVAL;
}
-static int v4l_streamon(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_streamon(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_streamon(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_streamon(file, NULL, *(unsigned int *)arg);
}
-static int v4l_streamoff(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_streamoff(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_streamoff(file, NULL, *(unsigned int *)arg);
}
-static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
@@ -1253,117 +1979,129 @@ static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- err = ops->vidioc_g_tuner(file, fh, p);
+ err = ops->vidioc_g_tuner(file, NULL, p);
if (!err)
p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
return err;
}
-static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- return ops->vidioc_s_tuner(file, fh, p);
+ return ops->vidioc_s_tuner(file, NULL, p);
}
static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_modulator *p = arg;
int err;
- err = ops->vidioc_g_modulator(file, fh, p);
+ if (vfd->vfl_type == VFL_TYPE_RADIO)
+ p->type = V4L2_TUNER_RADIO;
+
+ err = ops->vidioc_g_modulator(file, NULL, p);
if (!err)
p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
return err;
}
+static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_modulator *p = arg;
+
+ if (vfd->vfl_type == VFL_TYPE_RADIO)
+ p->type = V4L2_TUNER_RADIO;
+
+ return ops->vidioc_s_modulator(file, NULL, p);
+}
+
static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency *p = arg;
- p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- return ops->vidioc_g_frequency(file, fh, p);
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ p->type = V4L2_TUNER_SDR;
+ else
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ return ops->vidioc_g_frequency(file, NULL, p);
}
static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
const struct v4l2_frequency *p = arg;
enum v4l2_tuner_type type;
+ int ret;
- type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- if (p->type != type)
- return -EINVAL;
- return ops->vidioc_s_frequency(file, fh, p);
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
+ return ops->vidioc_s_frequency(file, NULL, p);
}
-static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enumstd(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_standard *p = arg;
- v4l2_std_id id = vfd->tvnorms, curr_id = 0;
- unsigned int index = p->index, i, j = 0;
- const char *descr = "";
-
- /* Return -ENODATA if the tvnorms for the current input
- or output is 0, meaning that it doesn't support this API. */
- if (id == 0)
- return -ENODATA;
-
- /* Return norm array in a canonical way */
- for (i = 0; i <= index && id; i++) {
- /* last std value in the standards array is 0, so this
- while always ends there since (id & 0) == 0. */
- while ((id & standards[j].std) != standards[j].std)
- j++;
- curr_id = standards[j].std;
- descr = standards[j].descr;
- j++;
- if (curr_id == 0)
- break;
- if (curr_id != V4L2_STD_PAL &&
- curr_id != V4L2_STD_SECAM &&
- curr_id != V4L2_STD_NTSC)
- id &= ~curr_id;
- }
- if (i <= index)
- return -EINVAL;
- v4l2_video_std_construct(p, curr_id, descr);
- return 0;
+ return v4l_video_std_enumstd(p, vfd->tvnorms);
}
-static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_std(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id id = *(v4l2_std_id *)arg, norm;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
norm = id & vfd->tvnorms;
if (vfd->tvnorms && !norm) /* Check if std is supported */
return -EINVAL;
/* Calls the specific handler */
- return ops->vidioc_s_std(file, fh, norm);
+ return ops->vidioc_s_std(file, NULL, norm);
}
-static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querystd(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id *p = arg;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
/*
* If no signal is detected, then the driver should return
* V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with
@@ -1373,91 +2111,143 @@ static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
* their efforts to improve the standards detection.
*/
*p = vfd->tvnorms;
- return ops->vidioc_querystd(file, fh, arg);
+ return ops->vidioc_querystd(file, NULL, arg);
}
static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_hw_freq_seek *p = arg;
enum v4l2_tuner_type type;
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+ /* s_hw_freq_seek is not supported for SDR for now */
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ return -EINVAL;
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
if (p->type != type)
return -EINVAL;
- return ops->vidioc_s_hw_freq_seek(file, fh, p);
+ return ops->vidioc_s_hw_freq_seek(file, NULL, p);
+}
+
+static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
+{
+ struct v4l2_framebuffer *p = arg;
+
+ p->base = NULL;
+ return ops->vidioc_s_fbuf(file, NULL, p);
}
-static int v4l_overlay(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_overlay(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_overlay(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_overlay(file, NULL, *(unsigned int *)arg);
}
-static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_requestbuffers *p = arg;
int ret = check_fmt(file, p->type);
if (ret)
return ret;
- CLEAR_AFTER_FIELD(p, memory);
+ memset_after(p, 0, flags);
+
+ p->capabilities = 0;
+ if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS))
+ p->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS;
- return ops->vidioc_reqbufs(file, fh, p);
+ return ops->vidioc_reqbufs(file, NULL, p);
}
-static int v4l_querybuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querybuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_querybuf(file, fh, p);
+ return ret ? ret : ops->vidioc_querybuf(file, NULL, p);
}
-static int v4l_qbuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_qbuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_qbuf(file, fh, p);
+ return ret ? ret : ops->vidioc_qbuf(file, NULL, p);
}
-static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_dqbuf(file, fh, p);
+ return ret ? ret : ops->vidioc_dqbuf(file, NULL, p);
}
static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_create_buffers *create = arg;
int ret = check_fmt(file, create->format.type);
- return ret ? ret : ops->vidioc_create_bufs(file, fh, create);
+ if (ret)
+ return ret;
+
+ memset_after(create, 0, flags);
+
+ v4l_sanitize_format(&create->format);
+
+ create->capabilities = 0;
+ if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS))
+ create->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS;
+
+ ret = ops->vidioc_create_bufs(file, NULL, create);
+
+ if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ create->format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+
+ return ret;
}
static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_buffer *b = arg;
int ret = check_fmt(file, b->type);
- return ret ? ret : ops->vidioc_prepare_buf(file, fh, b);
+ return ret ? ret : ops->vidioc_prepare_buf(file, NULL, b);
+}
+
+static int v4l_remove_bufs(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *arg)
+{
+ struct v4l2_remove_buffers *remove = arg;
+
+ if (ops->vidioc_remove_bufs)
+ return ops->vidioc_remove_bufs(file, NULL, remove);
+
+ return -ENOTTY;
}
-static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_streamparm *p = arg;
v4l2_std_id std;
int ret = check_fmt(file, p->type);
@@ -1465,67 +2255,105 @@ static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
if (ops->vidioc_g_parm)
- return ops->vidioc_g_parm(file, fh, p);
+ return ops->vidioc_g_parm(file, NULL, p);
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
- p->parm.capture.readbuffers = 2;
- ret = ops->vidioc_g_std(file, fh, &std);
+ if (vfd->device_caps & V4L2_CAP_READWRITE)
+ p->parm.capture.readbuffers = 2;
+ ret = ops->vidioc_g_std(file, NULL, &std);
if (ret == 0)
v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe);
return ret;
}
-static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_parm(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_streamparm *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_s_parm(file, fh, p);
+ if (ret)
+ return ret;
+
+ /* Note: extendedmode is never used in drivers */
+ if (V4L2_TYPE_IS_OUTPUT(p->type)) {
+ memset(p->parm.output.reserved, 0,
+ sizeof(p->parm.output.reserved));
+ p->parm.output.extendedmode = 0;
+ p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY;
+ } else {
+ memset(p->parm.capture.reserved, 0,
+ sizeof(p->parm.capture.reserved));
+ p->parm.capture.extendedmode = 0;
+ p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY;
+ }
+ return ops->vidioc_s_parm(file, NULL, p);
}
-static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
+ struct v4l2_query_ext_ctrl qec = {};
struct v4l2_queryctrl *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+ int ret;
if (vfh && vfh->ctrl_handler)
return v4l2_queryctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_queryctrl(vfd->ctrl_handler, p);
- if (ops->vidioc_queryctrl)
- return ops->vidioc_queryctrl(file, fh, p);
+ if (!ops->vidioc_query_ext_ctrl)
+ return -ENOTTY;
+
+ /* Simulate query_ext_ctr using query_ctrl. */
+ qec.id = p->id;
+ ret = ops->vidioc_query_ext_ctrl(file, NULL, &qec);
+ if (ret)
+ return ret;
+ v4l2_query_ext_ctrl_to_v4l2_queryctrl(p, &qec);
+ return ret;
+}
+
+static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_query_ext_ctrl *p = arg;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_query_ext_ctrl(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_query_ext_ctrl(vfd->ctrl_handler, p);
+ if (ops->vidioc_query_ext_ctrl)
+ return ops->vidioc_query_ext_ctrl(file, NULL, p);
return -ENOTTY;
}
-static int v4l_querymenu(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querymenu(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_querymenu *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
if (vfh && vfh->ctrl_handler)
return v4l2_querymenu(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_querymenu(vfd->ctrl_handler, p);
if (ops->vidioc_querymenu)
- return ops->vidioc_querymenu(file, fh, p);
+ return ops->vidioc_querymenu(file, NULL, p);
return -ENOTTY;
}
-static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_control *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
@@ -1533,18 +2361,16 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
return v4l2_g_ctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_g_ctrl(vfd->ctrl_handler, p);
- if (ops->vidioc_g_ctrl)
- return ops->vidioc_g_ctrl(file, fh, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
- ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
+ ctrls.which = V4L2_CTRL_ID2WHICH(p->id);
ctrls.count = 1;
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1)) {
- int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
+ if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) {
+ int ret = ops->vidioc_g_ext_ctrls(file, NULL, &ctrls);
if (ret == 0)
p->value = ctrl.value;
@@ -1553,112 +2379,159 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
-static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_control *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
+ int ret;
if (vfh && vfh->ctrl_handler)
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
- if (ops->vidioc_s_ctrl)
- return ops->vidioc_s_ctrl(file, fh, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
- ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
+ ctrls.which = V4L2_CTRL_ID2WHICH(p->id);
ctrls.count = 1;
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1))
- return ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
- return -EINVAL;
+ if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL))
+ return -EINVAL;
+ ret = ops->vidioc_s_ext_ctrls(file, NULL, &ctrls);
+ p->value = ctrl.value;
+ return ret;
}
static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
+ return v4l2_g_ext_ctrls(vfd->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ?
+ ops->vidioc_g_ext_ctrls(file, NULL, p) : -EINVAL;
}
static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
+ return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ?
+ ops->vidioc_s_ext_ctrls(file, NULL, p) : -EINVAL;
}
static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
+ return v4l2_try_ext_ctrls(vfd->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ?
+ ops->vidioc_try_ext_ctrls(file, NULL, p) : -EINVAL;
+}
+
+/*
+ * The selection API specified originally that the _MPLANE buffer types
+ * shouldn't be used. The reasons for this are lost in the mists of time
+ * (or just really crappy memories). Regardless, this is really annoying
+ * for userspace. So to keep things simple we map _MPLANE buffer types
+ * to their 'regular' counterparts before calling the driver. And we
+ * restore it afterwards. This way applications can use either buffer
+ * type and drivers don't need to check for both.
+ */
+static int v4l_g_selection(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *arg)
+{
+ struct v4l2_selection *p = arg;
+ u32 old_type = p->type;
+ int ret;
+
+ if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ret = ops->vidioc_g_selection(file, NULL, p);
+ p->type = old_type;
+ return ret;
+}
+
+static int v4l_s_selection(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *arg)
+{
+ struct v4l2_selection *p = arg;
+ u32 old_type = p->type;
+ int ret;
+
+ if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ret = ops->vidioc_s_selection(file, NULL, p);
+ p->type = old_type;
+ return ret;
}
-static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
struct v4l2_selection s = {
.type = p->type,
};
int ret;
- if (ops->vidioc_g_crop)
- return ops->vidioc_g_crop(file, fh, p);
/* simulate capture crop using selection api */
/* crop means compose for output devices */
if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ s.target = V4L2_SEL_TGT_COMPOSE;
else
- s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+ s.target = V4L2_SEL_TGT_CROP;
+
+ if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
+ s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
+ V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
- ret = ops->vidioc_g_selection(file, fh, &s);
+ ret = v4l_g_selection(ops, file, &s);
/* copying results to old structure on success */
if (!ret)
@@ -1666,37 +2539,67 @@ static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
return ret;
}
-static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_crop(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
struct v4l2_selection s = {
.type = p->type,
.r = p->c,
};
- if (ops->vidioc_s_crop)
- return ops->vidioc_s_crop(file, fh, p);
/* simulate capture crop using selection api */
/* crop means compose for output devices */
if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ s.target = V4L2_SEL_TGT_COMPOSE;
else
- s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+ s.target = V4L2_SEL_TGT_CROP;
+
+ if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
+ s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
+ V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
- return ops->vidioc_s_selection(file, fh, &s);
+ return v4l_s_selection(ops, file, &s);
}
-static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_cropcap *p = arg;
struct v4l2_selection s = { .type = p->type };
- int ret;
+ int ret = 0;
+
+ /* setting trivial pixelaspect */
+ p->pixelaspect.numerator = 1;
+ p->pixelaspect.denominator = 1;
+
+ if (s.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ s.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (s.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ s.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ /*
+ * The determine_valid_ioctls() call already should ensure
+ * that this can never happen, but just in case...
+ */
+ if (WARN_ON(!ops->vidioc_g_selection))
+ return -ENOTTY;
+
+ if (ops->vidioc_g_pixelaspect)
+ ret = ops->vidioc_g_pixelaspect(file, NULL, s.type,
+ &p->pixelaspect);
+
+ /*
+ * Ignore ENOTTY or ENOIOCTLCMD error returns, just use the
+ * square pixel aspect ratio in that case.
+ */
+ if (ret && ret != -ENOTTY && ret != -ENOIOCTLCMD)
+ return ret;
- if (ops->vidioc_cropcap)
- return ops->vidioc_cropcap(file, fh, p);
+ /* Use g_selection() to fill in the bounds and defrect rectangles */
/* obtaining bounds */
if (V4L2_TYPE_IS_OUTPUT(p->type))
@@ -1704,30 +2607,31 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
else
s.target = V4L2_SEL_TGT_CROP_BOUNDS;
- ret = ops->vidioc_g_selection(file, fh, &s);
+ if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
+ s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ?
+ V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS;
+
+ ret = v4l_g_selection(ops, file, &s);
if (ret)
return ret;
p->bounds = s.r;
/* obtaining defrect */
- if (V4L2_TYPE_IS_OUTPUT(p->type))
+ if (s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS)
s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
else
s.target = V4L2_SEL_TGT_CROP_DEFAULT;
- ret = ops->vidioc_g_selection(file, fh, &s);
+ ret = v4l_g_selection(ops, file, &s);
if (ret)
return ret;
p->defrect = s.r;
- /* setting trivial pixelaspect */
- p->pixelaspect.numerator = 1;
- p->pixelaspect.denominator = 1;
return 0;
}
-static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_log_status(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
int ret;
@@ -1735,7 +2639,7 @@ static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
if (vfd->v4l2_dev)
pr_info("%s: ================= START STATUS =================\n",
vfd->v4l2_dev->name);
- ret = ops->vidioc_log_status(file, fh);
+ ret = ops->vidioc_log_status(file, NULL);
if (vfd->v4l2_dev)
pr_info("%s: ================== END STATUS ==================\n",
vfd->v4l2_dev->name);
@@ -1743,7 +2647,7 @@ static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
}
static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct v4l2_dbg_register *p = arg;
@@ -1763,7 +2667,7 @@ static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
}
if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
(ops->vidioc_g_chip_info || p->match.addr == 0))
- return ops->vidioc_g_register(file, fh, p);
+ return ops->vidioc_g_register(file, NULL, p);
return -EINVAL;
#else
return -ENOTTY;
@@ -1771,7 +2675,7 @@ static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
}
static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
const struct v4l2_dbg_register *p = arg;
@@ -1791,7 +2695,7 @@ static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
}
if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
(ops->vidioc_g_chip_info || p->match.addr == 0))
- return ops->vidioc_s_register(file, fh, p);
+ return ops->vidioc_s_register(file, NULL, p);
return -EINVAL;
#else
return -ENOTTY;
@@ -1799,7 +2703,7 @@ static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
}
static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct video_device *vfd = video_devdata(file);
@@ -1813,9 +2717,9 @@ static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
p->flags |= V4L2_CHIP_FL_WRITABLE;
if (ops->vidioc_g_register)
p->flags |= V4L2_CHIP_FL_READABLE;
- strlcpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
+ strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
if (ops->vidioc_g_chip_info)
- return ops->vidioc_g_chip_info(file, fh, arg);
+ return ops->vidioc_g_chip_info(file, NULL, arg);
if (p->match.addr)
return -EINVAL;
return 0;
@@ -1830,7 +2734,7 @@ static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
p->flags |= V4L2_CHIP_FL_WRITABLE;
if (sd->ops->core && sd->ops->core->g_register)
p->flags |= V4L2_CHIP_FL_READABLE;
- strlcpy(p->name, sd->name, sizeof(p->name));
+ strscpy(p->name, sd->name, sizeof(p->name));
return 0;
}
break;
@@ -1841,26 +2745,32 @@ static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
#endif
}
-static int v4l_dqevent(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_dqevent(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
}
static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
- return ops->vidioc_subscribe_event(fh, arg);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ return ops->vidioc_subscribe_event(vfh, arg);
}
static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
- return ops->vidioc_unsubscribe_event(fh, arg);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ return ops->vidioc_unsubscribe_event(vfh, arg);
}
static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_sliced_vbi_cap *p = arg;
int ret = check_fmt(file, p->type);
@@ -1871,24 +2781,32 @@ static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
/* Clear up to type, everything after type is zeroed already */
memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
- return ops->vidioc_g_sliced_vbi_cap(file, fh, p);
+ return ops->vidioc_g_sliced_vbi_cap(file, NULL, p);
}
static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency_band *p = arg;
enum v4l2_tuner_type type;
int err;
- type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
-
- if (type != p->type)
- return -EINVAL;
- if (ops->vidioc_enum_freq_bands)
- return ops->vidioc_enum_freq_bands(file, fh, p);
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ type = p->type;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
+ if (ops->vidioc_enum_freq_bands) {
+ err = ops->vidioc_enum_freq_bands(file, NULL, p);
+ if (err != -ENOTTY)
+ return err;
+ }
if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) {
struct v4l2_tuner t = {
.index = p->tuner,
@@ -1897,7 +2815,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
if (p->index)
return -EINVAL;
- err = ops->vidioc_g_tuner(file, fh, &t);
+ err = ops->vidioc_g_tuner(file, NULL, &t);
if (err)
return err;
p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
@@ -1916,14 +2834,13 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
if (p->index)
return -EINVAL;
- err = ops->vidioc_g_modulator(file, fh, &m);
+ err = ops->vidioc_g_modulator(file, NULL, &m);
if (err)
return err;
p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
p->rangelow = m.rangelow;
p->rangehigh = m.rangehigh;
- p->modulation = (type == V4L2_TUNER_RADIO) ?
- V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
+ p->modulation = V4L2_BAND_MODULATION_FM;
return 0;
}
return -ENOTTY;
@@ -1933,144 +2850,173 @@ struct v4l2_ioctl_info {
unsigned int ioctl;
u32 flags;
const char * const name;
- union {
- u32 offset;
- int (*func)(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *p);
- } u;
+ int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *p);
void (*debug)(const void *arg, bool write_only);
};
/* This control needs a priority check */
-#define INFO_FL_PRIO (1 << 0)
+#define INFO_FL_PRIO (1 << 0)
/* This control can be valid if the filehandle passes a control handler. */
-#define INFO_FL_CTRL (1 << 1)
-/* This is a standard ioctl, no need for special code */
-#define INFO_FL_STD (1 << 2)
-/* This is ioctl has its own function */
-#define INFO_FL_FUNC (1 << 3)
+#define INFO_FL_CTRL (1 << 1)
/* Queuing ioctl */
-#define INFO_FL_QUEUE (1 << 4)
+#define INFO_FL_QUEUE (1 << 2)
+/* Always copy back result, even on error */
+#define INFO_FL_ALWAYS_COPY (1 << 3)
/* Zero struct from after the field to the end */
#define INFO_FL_CLEAR(v4l2_struct, field) \
((offsetof(struct v4l2_struct, field) + \
- sizeof(((struct v4l2_struct *)0)->field)) << 16)
-#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
-
-#define IOCTL_INFO_STD(_ioctl, _vidioc, _debug, _flags) \
- [_IOC_NR(_ioctl)] = { \
- .ioctl = _ioctl, \
- .flags = _flags | INFO_FL_STD, \
- .name = #_ioctl, \
- .u.offset = offsetof(struct v4l2_ioctl_ops, _vidioc), \
- .debug = _debug, \
- }
-
-#define IOCTL_INFO_FNC(_ioctl, _func, _debug, _flags) \
- [_IOC_NR(_ioctl)] = { \
- .ioctl = _ioctl, \
- .flags = _flags | INFO_FL_FUNC, \
- .name = #_ioctl, \
- .u.func = _func, \
- .debug = _debug, \
- }
-
-static struct v4l2_ioctl_info v4l2_ioctls[] = {
- IOCTL_INFO_FNC(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0),
- IOCTL_INFO_FNC(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, INFO_FL_CLEAR(v4l2_fmtdesc, type)),
- IOCTL_INFO_FNC(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, INFO_FL_CLEAR(v4l2_format, type)),
- IOCTL_INFO_FNC(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE),
- IOCTL_INFO_FNC(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)),
- IOCTL_INFO_STD(VIDIOC_G_FBUF, vidioc_g_fbuf, v4l_print_framebuffer, 0),
- IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
- IOCTL_INFO_STD(VIDIOC_EXPBUF, vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
- IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
- IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
- IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
- IOCTL_INFO_FNC(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)),
- IOCTL_INFO_FNC(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_G_STD, vidioc_g_std, v4l_print_std, 0),
- IOCTL_INFO_FNC(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)),
- IOCTL_INFO_FNC(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)),
- IOCTL_INFO_FNC(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)),
- IOCTL_INFO_FNC(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL),
- IOCTL_INFO_FNC(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)),
- IOCTL_INFO_FNC(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_G_AUDIO, vidioc_g_audio, v4l_print_audio, 0),
- IOCTL_INFO_STD(VIDIOC_S_AUDIO, vidioc_s_audio, v4l_print_audio, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)),
- IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
- IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0),
- IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0),
- IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
- IOCTL_INFO_STD(VIDIOC_G_AUDOUT, vidioc_g_audout, v4l_print_audioout, 0),
- IOCTL_INFO_STD(VIDIOC_S_AUDOUT, vidioc_s_audout, v4l_print_audioout, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)),
- IOCTL_INFO_STD(VIDIOC_S_MODULATOR, vidioc_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)),
- IOCTL_INFO_FNC(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
- IOCTL_INFO_FNC(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)),
- IOCTL_INFO_FNC(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_G_SELECTION, vidioc_g_selection, v4l_print_selection, 0),
- IOCTL_INFO_STD(VIDIOC_S_SELECTION, vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp, v4l_print_jpegcompression, 0),
- IOCTL_INFO_STD(VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0),
- IOCTL_INFO_FNC(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0),
- IOCTL_INFO_STD(VIDIOC_ENUMAUDIO, vidioc_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)),
- IOCTL_INFO_STD(VIDIOC_ENUMAUDOUT, vidioc_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)),
- IOCTL_INFO_FNC(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0),
- IOCTL_INFO_FNC(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
- IOCTL_INFO_FNC(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
- IOCTL_INFO_FNC(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
- IOCTL_INFO_FNC(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL),
- IOCTL_INFO_FNC(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
- IOCTL_INFO_STD(VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
- IOCTL_INFO_STD(VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
- IOCTL_INFO_STD(VIDIOC_G_ENC_INDEX, vidioc_g_enc_index, v4l_print_enc_idx, 0),
- IOCTL_INFO_STD(VIDIOC_ENCODER_CMD, vidioc_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
- IOCTL_INFO_STD(VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
- IOCTL_INFO_STD(VIDIOC_DECODER_CMD, vidioc_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd, v4l_print_decoder_cmd, 0),
- IOCTL_INFO_FNC(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
- IOCTL_INFO_FNC(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
- IOCTL_INFO_FNC(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings, v4l_print_dv_timings, 0),
- IOCTL_INFO_FNC(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
- IOCTL_INFO_FNC(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0),
- IOCTL_INFO_FNC(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0),
- IOCTL_INFO_FNC(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
- IOCTL_INFO_FNC(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
- IOCTL_INFO_STD(VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings, v4l_print_enum_dv_timings, 0),
- IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, 0),
- IOCTL_INFO_STD(VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)),
- IOCTL_INFO_FNC(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
- IOCTL_INFO_FNC(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)),
+ sizeof_field(struct v4l2_struct, field)) << 16)
+#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
+
+#define DEFINE_V4L_STUB_FUNC(_vidioc) \
+ static int v4l_stub_ ## _vidioc( \
+ const struct v4l2_ioctl_ops *ops, \
+ struct file *file, void *p) \
+ { \
+ return ops->vidioc_ ## _vidioc(file, NULL, p); \
+ }
+
+#define IOCTL_INFO(_ioctl, _func, _debug, _flags) \
+ [_IOC_NR(_ioctl)] = { \
+ .ioctl = _ioctl, \
+ .flags = _flags, \
+ .name = #_ioctl, \
+ .func = _func, \
+ .debug = _debug, \
+ }
+
+DEFINE_V4L_STUB_FUNC(g_fbuf)
+DEFINE_V4L_STUB_FUNC(expbuf)
+DEFINE_V4L_STUB_FUNC(g_std)
+DEFINE_V4L_STUB_FUNC(g_audio)
+DEFINE_V4L_STUB_FUNC(s_audio)
+DEFINE_V4L_STUB_FUNC(g_edid)
+DEFINE_V4L_STUB_FUNC(s_edid)
+DEFINE_V4L_STUB_FUNC(g_audout)
+DEFINE_V4L_STUB_FUNC(s_audout)
+DEFINE_V4L_STUB_FUNC(g_jpegcomp)
+DEFINE_V4L_STUB_FUNC(s_jpegcomp)
+DEFINE_V4L_STUB_FUNC(enumaudio)
+DEFINE_V4L_STUB_FUNC(enumaudout)
+DEFINE_V4L_STUB_FUNC(enum_framesizes)
+DEFINE_V4L_STUB_FUNC(enum_frameintervals)
+DEFINE_V4L_STUB_FUNC(g_enc_index)
+DEFINE_V4L_STUB_FUNC(encoder_cmd)
+DEFINE_V4L_STUB_FUNC(try_encoder_cmd)
+DEFINE_V4L_STUB_FUNC(decoder_cmd)
+DEFINE_V4L_STUB_FUNC(try_decoder_cmd)
+DEFINE_V4L_STUB_FUNC(s_dv_timings)
+DEFINE_V4L_STUB_FUNC(g_dv_timings)
+DEFINE_V4L_STUB_FUNC(enum_dv_timings)
+DEFINE_V4L_STUB_FUNC(query_dv_timings)
+DEFINE_V4L_STUB_FUNC(dv_timings_cap)
+
+static const struct v4l2_ioctl_info v4l2_ioctls[] = {
+ IOCTL_INFO(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0),
+ IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, 0),
+ IOCTL_INFO(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0),
+ IOCTL_INFO(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)),
+ IOCTL_INFO(VIDIOC_G_FBUF, v4l_stub_g_fbuf, v4l_print_framebuffer, 0),
+ IOCTL_INFO(VIDIOC_S_FBUF, v4l_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_EXPBUF, v4l_stub_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
+ IOCTL_INFO(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)),
+ IOCTL_INFO(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_STD, v4l_stub_g_std, v4l_print_std, 0),
+ IOCTL_INFO(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)),
+ IOCTL_INFO(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)),
+ IOCTL_INFO(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)),
+ IOCTL_INFO(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)),
+ IOCTL_INFO(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_AUDIO, v4l_stub_g_audio, v4l_print_audio, 0),
+ IOCTL_INFO(VIDIOC_S_AUDIO, v4l_stub_s_audio, v4l_print_audio, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)),
+ IOCTL_INFO(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
+ IOCTL_INFO(VIDIOC_G_INPUT, v4l_g_input, v4l_print_u32, 0),
+ IOCTL_INFO(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_EDID, v4l_stub_g_edid, v4l_print_edid, INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_S_EDID, v4l_stub_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_G_OUTPUT, v4l_g_output, v4l_print_u32, 0),
+ IOCTL_INFO(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
+ IOCTL_INFO(VIDIOC_G_AUDOUT, v4l_stub_g_audout, v4l_print_audioout, 0),
+ IOCTL_INFO(VIDIOC_S_AUDOUT, v4l_stub_s_audout, v4l_print_audioout, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)),
+ IOCTL_INFO(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)),
+ IOCTL_INFO(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
+ IOCTL_INFO(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)),
+ IOCTL_INFO(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_SELECTION, v4l_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)),
+ IOCTL_INFO(VIDIOC_S_SELECTION, v4l_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)),
+ IOCTL_INFO(VIDIOC_G_JPEGCOMP, v4l_stub_g_jpegcomp, v4l_print_jpegcompression, 0),
+ IOCTL_INFO(VIDIOC_S_JPEGCOMP, v4l_stub_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0),
+ IOCTL_INFO(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0),
+ IOCTL_INFO(VIDIOC_ENUMAUDIO, v4l_stub_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)),
+ IOCTL_INFO(VIDIOC_ENUMAUDOUT, v4l_stub_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)),
+ IOCTL_INFO(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0),
+ IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
+ IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
+ IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
+ IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
+ IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0),
+ IOCTL_INFO(VIDIOC_ENCODER_CMD, v4l_stub_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+ IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, v4l_stub_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+ IOCTL_INFO(VIDIOC_DECODER_CMD, v4l_stub_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, v4l_stub_try_decoder_cmd, v4l_print_decoder_cmd, 0),
+ IOCTL_INFO(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
+ IOCTL_INFO(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
+ IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_S_DV_TIMINGS, v4l_stub_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)),
+ IOCTL_INFO(VIDIOC_G_DV_TIMINGS, v4l_stub_g_dv_timings, v4l_print_dv_timings, 0),
+ IOCTL_INFO(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
+ IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0),
+ IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0),
+ IOCTL_INFO(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, v4l_stub_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)),
+ IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, v4l_stub_query_dv_timings, v4l_print_dv_timings, INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, v4l_stub_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, pad)),
+ IOCTL_INFO(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
+ IOCTL_INFO(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)),
+ IOCTL_INFO(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)),
+ IOCTL_INFO(VIDIOC_REMOVE_BUFS, v4l_remove_bufs, v4l_print_remove_buffers, INFO_FL_PRIO | INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_remove_buffers, type)),
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
-bool v4l2_is_known_ioctl(unsigned int cmd)
+static bool v4l2_is_known_ioctl(unsigned int cmd)
{
if (_IOC_NR(cmd) >= V4L2_IOCTLS)
return false;
return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
}
-struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned cmd)
+static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev,
+ struct v4l2_fh *vfh, unsigned int cmd,
+ void *arg)
{
if (_IOC_NR(cmd) >= V4L2_IOCTLS)
return vdev->lock;
- if (test_bit(_IOC_NR(cmd), vdev->disable_locking))
- return NULL;
+ if (vfh && vfh->m2m_ctx &&
+ (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) {
+ if (vfh->m2m_ctx->q_lock)
+ return vfh->m2m_ctx->q_lock;
+ }
if (vdev->queue && vdev->queue->lock &&
(v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE))
return vdev->queue->lock;
@@ -2091,7 +3037,7 @@ void v4l_printk_ioctl(const char *prefix, unsigned int cmd)
type = "v4l2_int";
break;
case 'V':
- if (_IOC_NR(cmd) >= V4L2_IOCTLS) {
+ if (!v4l2_is_known_ioctl(cmd)) {
type = "v4l2";
break;
}
@@ -2118,14 +3064,14 @@ static long __video_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *vfd = video_devdata(file);
+ struct mutex *req_queue_lock = NULL;
+ struct mutex *lock; /* ioctl serialization mutex */
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
bool write_only = false;
struct v4l2_ioctl_info default_info;
const struct v4l2_ioctl_info *info;
- void *fh = file->private_data;
- struct v4l2_fh *vfh = NULL;
- int use_fh_prio = 0;
- int debug = vfd->debug;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+ int dev_debug = vfd->dev_debug;
long ret = -ENOTTY;
if (ops == NULL) {
@@ -2134,19 +3080,41 @@ static long __video_do_ioctl(struct file *file,
return ret;
}
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- vfh = file->private_data;
- use_fh_prio = test_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+ /*
+ * We need to serialize streamon/off with queueing new requests.
+ * These ioctls may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (v4l2_device_supports_requests(vfd->v4l2_dev) &&
+ (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) {
+ req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex;
+
+ if (mutex_lock_interruptible(req_queue_lock))
+ return -ERESTARTSYS;
+ }
+
+ lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg);
+
+ if (lock && mutex_lock_interruptible(lock)) {
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
+ return -ERESTARTSYS;
+ }
+
+ if (!video_is_registered(vfd)) {
+ ret = -ENODEV;
+ goto unlock;
}
if (v4l2_is_known_ioctl(cmd)) {
info = &v4l2_ioctls[_IOC_NR(cmd)];
- if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
- !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
+ if (!is_valid_ioctl(vfd, cmd) &&
+ !((info->flags & INFO_FL_CTRL) && vfh->ctrl_handler))
goto done;
- if (use_fh_prio && (info->flags & INFO_FL_PRIO)) {
+ if (info->flags & INFO_FL_PRIO) {
ret = v4l2_prio_check(vfd->prio, vfh->prio);
if (ret)
goto done;
@@ -2159,28 +3127,26 @@ static long __video_do_ioctl(struct file *file,
}
write_only = _IOC_DIR(cmd) == _IOC_WRITE;
- if (info->flags & INFO_FL_STD) {
- typedef int (*vidioc_op)(struct file *file, void *fh, void *p);
- const void *p = vfd->ioctl_ops;
- const vidioc_op *vidioc = p + info->u.offset;
-
- ret = (*vidioc)(file, fh, arg);
- } else if (info->flags & INFO_FL_FUNC) {
- ret = info->u.func(ops, file, fh, arg);
+ if (info != &default_info) {
+ ret = info->func(ops, file, arg);
} else if (!ops->vidioc_default) {
ret = -ENOTTY;
} else {
- ret = ops->vidioc_default(file, fh,
- use_fh_prio ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
+ ret = ops->vidioc_default(file, NULL,
+ v4l2_prio_check(vfd->prio, vfh->prio) >= 0,
cmd, arg);
}
done:
- if (debug) {
+ if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) {
+ if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) &&
+ (cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF))
+ goto unlock;
+
v4l_printk_ioctl(video_device_node_name(vfd), cmd);
if (ret < 0)
pr_cont(": error %ld", ret);
- if (debug == V4L2_DEBUG_IOCTL)
+ if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG))
pr_cont("\n");
else if (_IOC_DIR(cmd) == _IOC_NONE)
info->debug(arg, write_only);
@@ -2190,11 +3156,16 @@ done:
}
}
+unlock:
+ if (lock)
+ mutex_unlock(lock);
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
return ret;
}
static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
- void * __user *user_ptr, void ***kernel_ptr)
+ void __user **user_ptr, void ***kernel_ptr)
{
int ret = 0;
@@ -2211,16 +3182,16 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
break;
}
*user_ptr = (void __user *)buf->m.planes;
- *kernel_ptr = (void *)&buf->m.planes;
+ *kernel_ptr = (void **)&buf->m.planes;
*array_size = sizeof(struct v4l2_plane) * buf->length;
ret = 1;
}
break;
}
- case VIDIOC_SUBDEV_G_EDID:
- case VIDIOC_SUBDEV_S_EDID: {
- struct v4l2_subdev_edid *edid = parg;
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID: {
+ struct v4l2_edid *edid = parg;
if (edid->blocks) {
if (edid->blocks > 256) {
@@ -2228,7 +3199,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
break;
}
*user_ptr = (void __user *)edid->edid;
- *kernel_ptr = (void *)&edid->edid;
+ *kernel_ptr = (void **)&edid->edid;
*array_size = edid->blocks * 128;
ret = 1;
}
@@ -2246,70 +3217,234 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
break;
}
*user_ptr = (void __user *)ctrls->controls;
- *kernel_ptr = (void *)&ctrls->controls;
+ *kernel_ptr = (void **)&ctrls->controls;
*array_size = sizeof(struct v4l2_ext_control)
* ctrls->count;
ret = 1;
}
break;
}
+
+ case VIDIOC_SUBDEV_G_ROUTING:
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *routing = parg;
+
+ if (routing->len_routes > 256)
+ return -E2BIG;
+
+ *user_ptr = u64_to_user_ptr(routing->routes);
+ *kernel_ptr = (void **)&routing->routes;
+ *array_size = sizeof(struct v4l2_subdev_route)
+ * routing->len_routes;
+ ret = 1;
+ break;
+ }
}
return ret;
}
+unsigned int v4l2_translate_cmd(unsigned int cmd)
+{
+#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ switch (cmd) {
+ case VIDIOC_DQEVENT_TIME32:
+ return VIDIOC_DQEVENT;
+ case VIDIOC_QUERYBUF_TIME32:
+ return VIDIOC_QUERYBUF;
+ case VIDIOC_QBUF_TIME32:
+ return VIDIOC_QBUF;
+ case VIDIOC_DQBUF_TIME32:
+ return VIDIOC_DQBUF;
+ case VIDIOC_PREPARE_BUF_TIME32:
+ return VIDIOC_PREPARE_BUF;
+ }
+#endif
+ if (in_compat_syscall())
+ return v4l2_compat_translate_cmd(cmd);
+
+ return cmd;
+}
+EXPORT_SYMBOL_GPL(v4l2_translate_cmd);
+
+static int video_get_user(void __user *arg, void *parg,
+ unsigned int real_cmd, unsigned int cmd,
+ bool *always_copy)
+{
+ unsigned int n = _IOC_SIZE(real_cmd);
+ int err = 0;
+
+ if (!(_IOC_DIR(cmd) & _IOC_WRITE)) {
+ /* read-only ioctl */
+ memset(parg, 0, n);
+ return 0;
+ }
+
+ /*
+ * In some cases, only a few fields are used as input,
+ * i.e. when the app sets "index" and then the driver
+ * fills in the rest of the structure for the thing
+ * with that index. We only need to copy up the first
+ * non-input field.
+ */
+ if (v4l2_is_known_ioctl(real_cmd)) {
+ u32 flags = v4l2_ioctls[_IOC_NR(real_cmd)].flags;
+
+ if (flags & INFO_FL_CLEAR_MASK)
+ n = (flags & INFO_FL_CLEAR_MASK) >> 16;
+ *always_copy = flags & INFO_FL_ALWAYS_COPY;
+ }
+
+ if (cmd == real_cmd) {
+ if (copy_from_user(parg, (void __user *)arg, n))
+ err = -EFAULT;
+ } else if (in_compat_syscall()) {
+ memset(parg, 0, n);
+ err = v4l2_compat_get_user(arg, parg, cmd);
+ } else {
+ memset(parg, 0, n);
+#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ switch (cmd) {
+ case VIDIOC_QUERYBUF_TIME32:
+ case VIDIOC_QBUF_TIME32:
+ case VIDIOC_DQBUF_TIME32:
+ case VIDIOC_PREPARE_BUF_TIME32: {
+ struct v4l2_buffer_time32 vb32;
+ struct v4l2_buffer *vb = parg;
+
+ if (copy_from_user(&vb32, arg, sizeof(vb32)))
+ return -EFAULT;
+
+ *vb = (struct v4l2_buffer) {
+ .index = vb32.index,
+ .type = vb32.type,
+ .bytesused = vb32.bytesused,
+ .flags = vb32.flags,
+ .field = vb32.field,
+ .timestamp.tv_sec = vb32.timestamp.tv_sec,
+ .timestamp.tv_usec = vb32.timestamp.tv_usec,
+ .timecode = vb32.timecode,
+ .sequence = vb32.sequence,
+ .memory = vb32.memory,
+ .m.userptr = vb32.m.userptr,
+ .length = vb32.length,
+ .request_fd = vb32.request_fd,
+ };
+ break;
+ }
+ }
+#endif
+ }
+
+ /* zero out anything we don't copy from userspace */
+ if (!err && n < _IOC_SIZE(real_cmd))
+ memset((u8 *)parg + n, 0, _IOC_SIZE(real_cmd) - n);
+ return err;
+}
+
+static int video_put_user(void __user *arg, void *parg,
+ unsigned int real_cmd, unsigned int cmd)
+{
+ if (!(_IOC_DIR(cmd) & _IOC_READ))
+ return 0;
+
+ if (cmd == real_cmd) {
+ /* Copy results into user buffer */
+ if (copy_to_user(arg, parg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ return 0;
+ }
+
+ if (in_compat_syscall())
+ return v4l2_compat_put_user(arg, parg, cmd);
+
+#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ switch (cmd) {
+ case VIDIOC_DQEVENT_TIME32: {
+ struct v4l2_event *ev = parg;
+ struct v4l2_event_time32 ev32;
+
+ memset(&ev32, 0, sizeof(ev32));
+
+ ev32.type = ev->type;
+ ev32.pending = ev->pending;
+ ev32.sequence = ev->sequence;
+ ev32.timestamp.tv_sec = ev->timestamp.tv_sec;
+ ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec;
+ ev32.id = ev->id;
+
+ memcpy(&ev32.u, &ev->u, sizeof(ev->u));
+ memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved));
+
+ if (copy_to_user(arg, &ev32, sizeof(ev32)))
+ return -EFAULT;
+ break;
+ }
+ case VIDIOC_QUERYBUF_TIME32:
+ case VIDIOC_QBUF_TIME32:
+ case VIDIOC_DQBUF_TIME32:
+ case VIDIOC_PREPARE_BUF_TIME32: {
+ struct v4l2_buffer *vb = parg;
+ struct v4l2_buffer_time32 vb32;
+
+ memset(&vb32, 0, sizeof(vb32));
+
+ vb32.index = vb->index;
+ vb32.type = vb->type;
+ vb32.bytesused = vb->bytesused;
+ vb32.flags = vb->flags;
+ vb32.field = vb->field;
+ vb32.timestamp.tv_sec = vb->timestamp.tv_sec;
+ vb32.timestamp.tv_usec = vb->timestamp.tv_usec;
+ vb32.timecode = vb->timecode;
+ vb32.sequence = vb->sequence;
+ vb32.memory = vb->memory;
+ vb32.m.userptr = vb->m.userptr;
+ vb32.length = vb->length;
+ vb32.request_fd = vb->request_fd;
+
+ if (copy_to_user(arg, &vb32, sizeof(vb32)))
+ return -EFAULT;
+ break;
+ }
+ }
+#endif
+
+ return 0;
+}
+
long
-video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
- void *mbuf = NULL;
+ void *mbuf = NULL, *array_buf = NULL;
void *parg = (void *)arg;
long err = -EINVAL;
bool has_array_args;
+ bool always_copy = false;
size_t array_size = 0;
void __user *user_ptr = NULL;
void **kernel_ptr = NULL;
+ unsigned int cmd = v4l2_translate_cmd(orig_cmd);
+ const size_t ioc_size = _IOC_SIZE(cmd);
/* Copy arguments into temp kernel buffer */
if (_IOC_DIR(cmd) != _IOC_NONE) {
- if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
+ if (ioc_size <= sizeof(sbuf)) {
parg = sbuf;
} else {
/* too big to allocate from stack */
- mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+ mbuf = kmalloc(ioc_size, GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
- err = -EFAULT;
- if (_IOC_DIR(cmd) & _IOC_WRITE) {
- unsigned int n = _IOC_SIZE(cmd);
-
- /*
- * In some cases, only a few fields are used as input,
- * i.e. when the app sets "index" and then the driver
- * fills in the rest of the structure for the thing
- * with that index. We only need to copy up the first
- * non-input field.
- */
- if (v4l2_is_known_ioctl(cmd)) {
- u32 flags = v4l2_ioctls[_IOC_NR(cmd)].flags;
- if (flags & INFO_FL_CLEAR_MASK)
- n = (flags & INFO_FL_CLEAR_MASK) >> 16;
- }
-
- if (copy_from_user(parg, (void __user *)arg, n))
- goto out;
-
- /* zero out anything we don't copy from userspace */
- if (n < _IOC_SIZE(cmd))
- memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
- } else {
- /* read-only ioctl */
- memset(parg, 0, _IOC_SIZE(cmd));
- }
+ err = video_get_user((void __user *)arg, parg, cmd,
+ orig_cmd, &always_copy);
+ if (err)
+ goto out;
}
err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
@@ -2318,53 +3453,76 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
has_array_args = err;
if (has_array_args) {
- /*
- * When adding new types of array args, make sure that the
- * parent argument to ioctl (which contains the pointer to the
- * array) fits into sbuf (so that mbuf will still remain
- * unused up to here).
- */
- mbuf = kmalloc(array_size, GFP_KERNEL);
+ array_buf = kvmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
- if (NULL == mbuf)
- goto out_array_args;
- err = -EFAULT;
- if (copy_from_user(mbuf, user_ptr, array_size))
- goto out_array_args;
- *kernel_ptr = mbuf;
+ if (array_buf == NULL)
+ goto out;
+ if (in_compat_syscall())
+ err = v4l2_compat_get_array_args(file, array_buf,
+ user_ptr, array_size,
+ orig_cmd, parg);
+ else
+ err = copy_from_user(array_buf, user_ptr, array_size) ?
+ -EFAULT : 0;
+ if (err)
+ goto out;
+ *kernel_ptr = array_buf;
}
/* Handles IOCTL */
err = func(file, cmd, parg);
- if (err == -ENOIOCTLCMD)
+ if (err == -ENOTTY || err == -ENOIOCTLCMD) {
err = -ENOTTY;
+ goto out;
+ }
- if (has_array_args) {
- *kernel_ptr = user_ptr;
- if (copy_to_user(user_ptr, mbuf, array_size))
- err = -EFAULT;
- goto out_array_args;
+ if (err == 0) {
+ if (cmd == VIDIOC_DQBUF)
+ trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
+ else if (cmd == VIDIOC_QBUF)
+ trace_v4l2_qbuf(video_devdata(file)->minor, parg);
}
- /* VIDIOC_QUERY_DV_TIMINGS can return an error, but still have valid
- results that must be returned. */
- if (err < 0 && cmd != VIDIOC_QUERY_DV_TIMINGS)
+
+ /*
+ * Some ioctls can return an error, but still have valid
+ * results that must be returned.
+ *
+ * FIXME: subdev IOCTLS are partially handled here and partially in
+ * v4l2-subdev.c and the 'always_copy' flag can only be set for IOCTLS
+ * defined here as part of the 'v4l2_ioctls' array. As
+ * VIDIOC_SUBDEV_[GS]_ROUTING needs to return results to applications
+ * even in case of failure, but it is not defined here as part of the
+ * 'v4l2_ioctls' array, insert an ad-hoc check to address that.
+ */
+ if (cmd == VIDIOC_SUBDEV_G_ROUTING || cmd == VIDIOC_SUBDEV_S_ROUTING)
+ always_copy = true;
+
+ if (err < 0 && !always_copy)
goto out;
-out_array_args:
- /* Copy results into user buffer */
- switch (_IOC_DIR(cmd)) {
- case _IOC_READ:
- case (_IOC_WRITE | _IOC_READ):
- if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
+ if (has_array_args) {
+ *kernel_ptr = (void __force *)user_ptr;
+ if (in_compat_syscall()) {
+ int put_err;
+
+ put_err = v4l2_compat_put_array_args(file, user_ptr,
+ array_buf,
+ array_size,
+ orig_cmd, parg);
+ if (put_err)
+ err = put_err;
+ } else if (copy_to_user(user_ptr, array_buf, array_size)) {
err = -EFAULT;
- break;
+ }
}
+ if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
+ err = -EFAULT;
out:
+ kvfree(array_buf);
kfree(mbuf);
return err;
}
-EXPORT_SYMBOL(video_usercopy);
long video_ioctl2(struct file *file,
unsigned int cmd, unsigned long arg)
diff --git a/drivers/media/v4l2-core/v4l2-isp.c b/drivers/media/v4l2-core/v4l2-isp.c
new file mode 100644
index 000000000000..29831f7032e9
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-isp.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Video4Linux2 generic ISP parameters and statistics support
+ *
+ * Copyright (C) 2025 Ideas On Board Oy
+ * Author: Jacopo Mondi <jacopo.mondi@ideasonboard.com>
+ */
+
+#include <media/v4l2-isp.h>
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+
+#include <media/videobuf2-core.h>
+
+int v4l2_isp_params_validate_buffer_size(struct device *dev,
+ struct vb2_buffer *vb,
+ size_t max_size)
+{
+ size_t header_size = offsetof(struct v4l2_isp_params_buffer, data);
+ size_t payload_size = vb2_get_plane_payload(vb, 0);
+
+ /* Payload size can't be greater than the destination buffer size */
+ if (payload_size > max_size) {
+ dev_dbg(dev, "Payload size is too large: %zu\n", payload_size);
+ return -EINVAL;
+ }
+
+ /* Payload size can't be smaller than the header size */
+ if (payload_size < header_size) {
+ dev_dbg(dev, "Payload size is too small: %zu\n", payload_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_isp_params_validate_buffer_size);
+
+int v4l2_isp_params_validate_buffer(struct device *dev, struct vb2_buffer *vb,
+ const struct v4l2_isp_params_buffer *buffer,
+ const struct v4l2_isp_params_block_type_info *type_info,
+ size_t num_block_types)
+{
+ size_t header_size = offsetof(struct v4l2_isp_params_buffer, data);
+ size_t payload_size = vb2_get_plane_payload(vb, 0);
+ size_t block_offset = 0;
+ size_t buffer_size;
+
+ /*
+ * Currently only the first version of the V4L2 ISP parameters format is
+ * supported. We accept both V0 and V1 to support existing drivers
+ * compatible with V4L2 ISP that use either 0 or 1 as their "first
+ * version" identifiers.
+ */
+ if (buffer->version != V4L2_ISP_PARAMS_VERSION_V0 &&
+ buffer->version != V4L2_ISP_PARAMS_VERSION_V1) {
+ dev_dbg(dev,
+ "Unsupported V4L2 ISP parameters format version: %u\n",
+ buffer->version);
+ return -EINVAL;
+ }
+
+ /* Validate the size reported in the header */
+ buffer_size = header_size + buffer->data_size;
+ if (buffer_size != payload_size) {
+ dev_dbg(dev, "Data size %zu and payload size %zu are different\n",
+ buffer_size, payload_size);
+ return -EINVAL;
+ }
+
+ /* Walk the list of ISP configuration blocks and validate them. */
+ buffer_size = buffer->data_size;
+ while (buffer_size >= sizeof(struct v4l2_isp_params_block_header)) {
+ const struct v4l2_isp_params_block_type_info *info;
+ const struct v4l2_isp_params_block_header *block;
+
+ block = (const struct v4l2_isp_params_block_header *)
+ (buffer->data + block_offset);
+
+ if (block->type >= num_block_types) {
+ dev_dbg(dev,
+ "Invalid block type %u at offset %zu\n",
+ block->type, block_offset);
+ return -EINVAL;
+ }
+
+ if (block->size > buffer_size) {
+ dev_dbg(dev, "Premature end of parameters data\n");
+ return -EINVAL;
+ }
+
+ /* It's invalid to specify both ENABLE and DISABLE. */
+ if ((block->flags & (V4L2_ISP_PARAMS_FL_BLOCK_ENABLE |
+ V4L2_ISP_PARAMS_FL_BLOCK_DISABLE)) ==
+ (V4L2_ISP_PARAMS_FL_BLOCK_ENABLE |
+ V4L2_ISP_PARAMS_FL_BLOCK_DISABLE)) {
+ dev_dbg(dev, "Invalid block flags %x at offset %zu\n",
+ block->flags, block_offset);
+ return -EINVAL;
+ }
+
+ /*
+ * Match the block reported size against the type info provided
+ * one, but allow the block to only contain the header in
+ * case it is going to be disabled.
+ */
+ info = &type_info[block->type];
+ if (block->size != info->size &&
+ (!(block->flags & V4L2_ISP_PARAMS_FL_BLOCK_DISABLE) ||
+ block->size != sizeof(*block))) {
+ dev_dbg(dev,
+ "Invalid block size %u (expected %zu) at offset %zu\n",
+ block->size, info->size, block_offset);
+ return -EINVAL;
+ }
+
+ block_offset += block->size;
+ buffer_size -= block->size;
+ }
+
+ if (buffer_size) {
+ dev_dbg(dev, "Unexpected data after the parameters buffer end\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_isp_params_validate_buffer);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jacopo Mondi <jacopo.mondi@ideasonboard.com");
+MODULE_DESCRIPTION("V4L2 generic ISP parameters and statistics helpers");
diff --git a/drivers/media/v4l2-core/v4l2-jpeg.c b/drivers/media/v4l2-core/v4l2-jpeg.c
new file mode 100644
index 000000000000..36a0f1a1b0d9
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-jpeg.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * V4L2 JPEG header parser helpers.
+ *
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ *
+ * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1]
+ *
+ * [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf
+ */
+
+#include <linux/unaligned.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <media/v4l2-jpeg.h>
+
+MODULE_DESCRIPTION("V4L2 JPEG header parser helpers");
+MODULE_AUTHOR("Philipp Zabel <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+
+/* Table B.1 - Marker code assignments */
+#define SOF0 0xffc0 /* start of frame */
+#define SOF1 0xffc1
+#define SOF2 0xffc2
+#define SOF3 0xffc3
+#define SOF5 0xffc5
+#define SOF7 0xffc7
+#define JPG 0xffc8 /* extensions */
+#define SOF9 0xffc9
+#define SOF11 0xffcb
+#define SOF13 0xffcd
+#define SOF15 0xffcf
+#define DHT 0xffc4 /* huffman table */
+#define DAC 0xffcc /* arithmetic coding conditioning */
+#define RST0 0xffd0 /* restart */
+#define RST7 0xffd7
+#define SOI 0xffd8 /* start of image */
+#define EOI 0xffd9 /* end of image */
+#define SOS 0xffda /* start of stream */
+#define DQT 0xffdb /* quantization table */
+#define DNL 0xffdc /* number of lines */
+#define DRI 0xffdd /* restart interval */
+#define DHP 0xffde /* hierarchical progression */
+#define EXP 0xffdf /* expand reference */
+#define APP0 0xffe0 /* application data */
+#define APP14 0xffee /* application data for colour encoding */
+#define APP15 0xffef
+#define JPG0 0xfff0 /* extensions */
+#define JPG13 0xfffd
+#define COM 0xfffe /* comment */
+#define TEM 0xff01 /* temporary */
+
+/* Luma and chroma qp tables to achieve 50% compression quality
+ * This is as per example in Annex K.1 of ITU-T.81
+ */
+const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK] = {
+ 16, 11, 10, 16, 24, 40, 51, 61,
+ 12, 12, 14, 19, 26, 58, 60, 55,
+ 14, 13, 16, 24, 40, 57, 69, 56,
+ 14, 17, 22, 29, 51, 87, 80, 62,
+ 18, 22, 37, 56, 68, 109, 103, 77,
+ 24, 35, 55, 64, 81, 104, 113, 92,
+ 49, 64, 78, 87, 103, 121, 120, 101,
+ 72, 92, 95, 98, 112, 100, 103, 99
+};
+EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_qt);
+
+const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK] = {
+ 17, 18, 24, 47, 99, 99, 99, 99,
+ 18, 21, 26, 66, 99, 99, 99, 99,
+ 24, 26, 56, 99, 99, 99, 99, 99,
+ 47, 66, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99
+};
+EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_qt);
+
+/* Zigzag scan pattern indexes */
+const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+EXPORT_SYMBOL_GPL(v4l2_jpeg_zigzag_scan_index);
+
+/*
+ * Contains the data that needs to be sent in the marker segment of an
+ * interchange format JPEG stream or an abbreviated format table specification
+ * data stream. Specifies the huffman table used for encoding the luminance DC
+ * coefficient differences. The table represents Table K.3 of ITU-T.81
+ */
+const u8 v4l2_jpeg_ref_table_luma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN] = {
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B
+};
+EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_dc_ht);
+
+/*
+ * Contains the data that needs to be sent in the marker segment of an
+ * interchange format JPEG stream or an abbreviated format table specification
+ * data stream. Specifies the huffman table used for encoding the luminance AC
+ * coefficients. The table represents Table K.5 of ITU-T.81
+ */
+const u8 v4l2_jpeg_ref_table_luma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN] = {
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04,
+ 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32,
+ 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A,
+ 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55,
+ 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85,
+ 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2,
+ 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8,
+ 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA,
+ 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA
+};
+EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_luma_ac_ht);
+
+/*
+ * Contains the data that needs to be sent in the marker segment of an interchange format JPEG
+ * stream or an abbreviated format table specification data stream.
+ * Specifies the huffman table used for encoding the chrominance DC coefficient differences.
+ * The table represents Table K.4 of ITU-T.81
+ */
+const u8 v4l2_jpeg_ref_table_chroma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN] = {
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B
+};
+EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_dc_ht);
+
+/*
+ * Contains the data that needs to be sent in the marker segment of an
+ * interchange format JPEG stream or an abbreviated format table specification
+ * data stream. Specifies the huffman table used for encoding the chrominance
+ * AC coefficients. The table represents Table K.6 of ITU-T.81
+ */
+const u8 v4l2_jpeg_ref_table_chroma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN] = {
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04,
+ 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81,
+ 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0,
+ 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17,
+ 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38,
+ 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54,
+ 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83,
+ 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9,
+ 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3,
+ 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
+ 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9,
+ 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA
+};
+EXPORT_SYMBOL_GPL(v4l2_jpeg_ref_table_chroma_ac_ht);
+
+/**
+ * struct jpeg_stream - JPEG byte stream
+ * @curr: current position in stream
+ * @end: end position, after last byte
+ */
+struct jpeg_stream {
+ u8 *curr;
+ u8 *end;
+};
+
+/* returns a value that fits into u8, or negative error */
+static int jpeg_get_byte(struct jpeg_stream *stream)
+{
+ if (stream->curr >= stream->end)
+ return -EINVAL;
+
+ return *stream->curr++;
+}
+
+/* returns a value that fits into u16, or negative error */
+static int jpeg_get_word_be(struct jpeg_stream *stream)
+{
+ u16 word;
+
+ if (stream->curr + sizeof(__be16) > stream->end)
+ return -EINVAL;
+
+ word = get_unaligned_be16(stream->curr);
+ stream->curr += sizeof(__be16);
+
+ return word;
+}
+
+static int jpeg_skip(struct jpeg_stream *stream, size_t len)
+{
+ if (stream->curr + len > stream->end)
+ return -EINVAL;
+
+ stream->curr += len;
+
+ return 0;
+}
+
+static int jpeg_next_marker(struct jpeg_stream *stream)
+{
+ int byte;
+ u16 marker = 0;
+
+ while ((byte = jpeg_get_byte(stream)) >= 0) {
+ marker = (marker << 8) | byte;
+ /* skip stuffing bytes and REServed markers */
+ if (marker == TEM || (marker > 0xffbf && marker < 0xffff))
+ return marker;
+ }
+
+ return byte;
+}
+
+/* this does not advance the current position in the stream */
+static int jpeg_reference_segment(struct jpeg_stream *stream,
+ struct v4l2_jpeg_reference *segment)
+{
+ u16 len;
+
+ if (stream->curr + sizeof(__be16) > stream->end)
+ return -EINVAL;
+
+ len = get_unaligned_be16(stream->curr);
+ if (stream->curr + len > stream->end)
+ return -EINVAL;
+
+ segment->start = stream->curr;
+ segment->length = len;
+
+ return 0;
+}
+
+static int v4l2_jpeg_decode_subsampling(u8 nf, u8 h_v)
+{
+ if (nf == 1)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+
+ /* no chroma subsampling for 4-component images */
+ if (nf == 4 && h_v != 0x11)
+ return -EINVAL;
+
+ switch (h_v) {
+ case 0x11:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_444;
+ case 0x21:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_422;
+ case 0x22:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+ case 0x41:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_411;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int jpeg_parse_frame_header(struct jpeg_stream *stream, u16 sof_marker,
+ struct v4l2_jpeg_frame_header *frame_header)
+{
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Lf = 8 + 3 * Nf, Nf >= 1 */
+ if (len < 8 + 3)
+ return -EINVAL;
+
+ if (frame_header) {
+ /* Table B.2 - Frame header parameter sizes and values */
+ int p, y, x, nf;
+ int i;
+
+ p = jpeg_get_byte(stream);
+ if (p < 0)
+ return p;
+ /*
+ * Baseline DCT only supports 8-bit precision.
+ * Extended sequential DCT also supports 12-bit precision.
+ */
+ if (p != 8 && (p != 12 || sof_marker != SOF1))
+ return -EINVAL;
+
+ y = jpeg_get_word_be(stream);
+ if (y < 0)
+ return y;
+ if (y == 0)
+ return -EINVAL;
+
+ x = jpeg_get_word_be(stream);
+ if (x < 0)
+ return x;
+ if (x == 0)
+ return -EINVAL;
+
+ nf = jpeg_get_byte(stream);
+ if (nf < 0)
+ return nf;
+ /*
+ * The spec allows 1 <= Nf <= 255, but we only support up to 4
+ * components.
+ */
+ if (nf < 1 || nf > V4L2_JPEG_MAX_COMPONENTS)
+ return -EINVAL;
+ if (len != 8 + 3 * nf)
+ return -EINVAL;
+
+ frame_header->precision = p;
+ frame_header->height = y;
+ frame_header->width = x;
+ frame_header->num_components = nf;
+
+ for (i = 0; i < nf; i++) {
+ struct v4l2_jpeg_frame_component_spec *component;
+ int c, h_v, tq;
+
+ c = jpeg_get_byte(stream);
+ if (c < 0)
+ return c;
+
+ h_v = jpeg_get_byte(stream);
+ if (h_v < 0)
+ return h_v;
+ if (i == 0) {
+ int subs;
+
+ subs = v4l2_jpeg_decode_subsampling(nf, h_v);
+ if (subs < 0)
+ return subs;
+ frame_header->subsampling = subs;
+ } else if (h_v != 0x11) {
+ /* all chroma sampling factors must be 1 */
+ return -EINVAL;
+ }
+
+ tq = jpeg_get_byte(stream);
+ if (tq < 0)
+ return tq;
+
+ component = &frame_header->component[i];
+ component->component_identifier = c;
+ component->horizontal_sampling_factor =
+ (h_v >> 4) & 0xf;
+ component->vertical_sampling_factor = h_v & 0xf;
+ component->quantization_table_selector = tq;
+ }
+ } else {
+ return jpeg_skip(stream, len - 2);
+ }
+
+ return 0;
+}
+
+static int jpeg_parse_scan_header(struct jpeg_stream *stream,
+ struct v4l2_jpeg_scan_header *scan_header)
+{
+ size_t skip;
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Ls = 8 + 3 * Ns, Ns >= 1 */
+ if (len < 6 + 2)
+ return -EINVAL;
+
+ if (scan_header) {
+ int ns;
+ int i;
+
+ ns = jpeg_get_byte(stream);
+ if (ns < 0)
+ return ns;
+ if (ns < 1 || ns > 4 || len != 6 + 2 * ns)
+ return -EINVAL;
+
+ scan_header->num_components = ns;
+
+ for (i = 0; i < ns; i++) {
+ struct v4l2_jpeg_scan_component_spec *component;
+ int cs, td_ta;
+
+ cs = jpeg_get_byte(stream);
+ if (cs < 0)
+ return cs;
+
+ td_ta = jpeg_get_byte(stream);
+ if (td_ta < 0)
+ return td_ta;
+
+ component = &scan_header->component[i];
+ component->component_selector = cs;
+ component->dc_entropy_coding_table_selector =
+ (td_ta >> 4) & 0xf;
+ component->ac_entropy_coding_table_selector =
+ td_ta & 0xf;
+ }
+
+ skip = 3; /* skip Ss, Se, Ah, and Al */
+ } else {
+ skip = len - 2;
+ }
+
+ return jpeg_skip(stream, skip);
+}
+
+/* B.2.4.1 Quantization table-specification syntax */
+static int jpeg_parse_quantization_tables(struct jpeg_stream *stream,
+ u8 precision,
+ struct v4l2_jpeg_reference *tables)
+{
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Lq = 2 + n * 65 (for baseline DCT), n >= 1 */
+ if (len < 2 + 65)
+ return -EINVAL;
+
+ len -= 2;
+ while (len >= 65) {
+ u8 pq, tq, *qk;
+ int ret;
+ int pq_tq = jpeg_get_byte(stream);
+
+ if (pq_tq < 0)
+ return pq_tq;
+
+ /* quantization table element precision */
+ pq = (pq_tq >> 4) & 0xf;
+ /*
+ * Only 8-bit Qk values for 8-bit sample precision. Extended
+ * sequential DCT with 12-bit sample precision also supports
+ * 16-bit Qk values.
+ */
+ if (pq != 0 && (pq != 1 || precision != 12))
+ return -EINVAL;
+
+ /* quantization table destination identifier */
+ tq = pq_tq & 0xf;
+ if (tq > 3)
+ return -EINVAL;
+
+ /* quantization table element */
+ qk = stream->curr;
+ ret = jpeg_skip(stream, pq ? 128 : 64);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (tables) {
+ tables[tq].start = qk;
+ tables[tq].length = pq ? 128 : 64;
+ }
+
+ len -= pq ? 129 : 65;
+ }
+
+ return 0;
+}
+
+/* B.2.4.2 Huffman table-specification syntax */
+static int jpeg_parse_huffman_tables(struct jpeg_stream *stream,
+ struct v4l2_jpeg_reference *tables)
+{
+ int mt;
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Table B.5 - Huffman table specification parameter sizes and values */
+ if (len < 2 + 17)
+ return -EINVAL;
+
+ for (len -= 2; len >= 17; len -= 17 + mt) {
+ u8 tc, th, *table;
+ int tc_th = jpeg_get_byte(stream);
+ int i, ret;
+
+ if (tc_th < 0)
+ return tc_th;
+
+ /* table class - 0 = DC, 1 = AC */
+ tc = (tc_th >> 4) & 0xf;
+ if (tc > 1)
+ return -EINVAL;
+
+ /* huffman table destination identifier */
+ th = tc_th & 0xf;
+ /* only two Huffman tables for baseline DCT */
+ if (th > 1)
+ return -EINVAL;
+
+ /* BITS - number of Huffman codes with length i */
+ table = stream->curr;
+ mt = 0;
+ for (i = 0; i < 16; i++) {
+ int li;
+
+ li = jpeg_get_byte(stream);
+ if (li < 0)
+ return li;
+
+ mt += li;
+ }
+ /* HUFFVAL - values associated with each Huffman code */
+ ret = jpeg_skip(stream, mt);
+ if (ret < 0)
+ return ret;
+
+ if (tables) {
+ tables[(tc << 1) | th].start = table;
+ tables[(tc << 1) | th].length = stream->curr - table;
+ }
+ }
+
+ return jpeg_skip(stream, len - 2);
+}
+
+/* B.2.4.4 Restart interval definition syntax */
+static int jpeg_parse_restart_interval(struct jpeg_stream *stream,
+ u16 *restart_interval)
+{
+ int len = jpeg_get_word_be(stream);
+ int ri;
+
+ if (len < 0)
+ return len;
+ if (len != 4)
+ return -EINVAL;
+
+ ri = jpeg_get_word_be(stream);
+ if (ri < 0)
+ return ri;
+
+ *restart_interval = ri;
+
+ return 0;
+}
+
+static int jpeg_skip_segment(struct jpeg_stream *stream)
+{
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ if (len < 2)
+ return -EINVAL;
+
+ return jpeg_skip(stream, len - 2);
+}
+
+/* Rec. ITU-T T.872 (06/2012) 6.5.3 */
+static int jpeg_parse_app14_data(struct jpeg_stream *stream,
+ enum v4l2_jpeg_app14_tf *tf)
+{
+ int ret;
+ int lp;
+ int skip;
+
+ lp = jpeg_get_word_be(stream);
+ if (lp < 0)
+ return lp;
+
+ /* Check for "Adobe\0" in Ap1..6 */
+ if (stream->curr + 6 > stream->end ||
+ strncmp(stream->curr, "Adobe\0", 6))
+ return jpeg_skip(stream, lp - 2);
+
+ /* get to Ap12 */
+ ret = jpeg_skip(stream, 11);
+ if (ret < 0)
+ return ret;
+
+ ret = jpeg_get_byte(stream);
+ if (ret < 0)
+ return ret;
+
+ *tf = ret;
+
+ /* skip the rest of the segment, this ensures at least it is complete */
+ skip = lp - 2 - 11 - 1;
+ return jpeg_skip(stream, skip);
+}
+
+/**
+ * v4l2_jpeg_parse_header - locate marker segments and optionally parse headers
+ * @buf: address of the JPEG buffer, should start with a SOI marker
+ * @len: length of the JPEG buffer
+ * @out: returns marker segment positions and optionally parsed headers
+ *
+ * The out->scan_header pointer must be initialized to NULL or point to a valid
+ * v4l2_jpeg_scan_header structure. The out->huffman_tables and
+ * out->quantization_tables pointers must be initialized to NULL or point to a
+ * valid array of 4 v4l2_jpeg_reference structures each.
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out)
+{
+ struct jpeg_stream stream;
+ int marker;
+ int ret = 0;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+
+ out->num_dht = 0;
+ out->num_dqt = 0;
+
+ /* the first bytes must be SOI, B.2.1 High-level syntax */
+ if (jpeg_get_word_be(&stream) != SOI)
+ return -EINVAL;
+
+ /* init value to signal if this marker is not present */
+ out->app14_tf = V4L2_JPEG_APP14_TF_UNKNOWN;
+
+ /* loop through marker segments */
+ while ((marker = jpeg_next_marker(&stream)) >= 0) {
+ switch (marker) {
+ /* baseline DCT, extended sequential DCT */
+ case SOF0 ... SOF1:
+ ret = jpeg_reference_segment(&stream, &out->sof);
+ if (ret < 0)
+ return ret;
+ ret = jpeg_parse_frame_header(&stream, marker,
+ &out->frame);
+ break;
+ /* progressive, lossless */
+ case SOF2 ... SOF3:
+ /* differential coding */
+ case SOF5 ... SOF7:
+ /* arithmetic coding */
+ case SOF9 ... SOF11:
+ case SOF13 ... SOF15:
+ case DAC:
+ case TEM:
+ return -EINVAL;
+
+ case DHT:
+ ret = jpeg_reference_segment(&stream,
+ &out->dht[out->num_dht++ % 4]);
+ if (ret < 0)
+ return ret;
+ if (!out->huffman_tables) {
+ ret = jpeg_skip_segment(&stream);
+ break;
+ }
+ ret = jpeg_parse_huffman_tables(&stream,
+ out->huffman_tables);
+ break;
+ case DQT:
+ ret = jpeg_reference_segment(&stream,
+ &out->dqt[out->num_dqt++ % 4]);
+ if (ret < 0)
+ return ret;
+ if (!out->quantization_tables) {
+ ret = jpeg_skip_segment(&stream);
+ break;
+ }
+ ret = jpeg_parse_quantization_tables(&stream,
+ out->frame.precision,
+ out->quantization_tables);
+ break;
+ case DRI:
+ ret = jpeg_parse_restart_interval(&stream,
+ &out->restart_interval);
+ break;
+ case APP14:
+ ret = jpeg_parse_app14_data(&stream,
+ &out->app14_tf);
+ break;
+ case SOS:
+ ret = jpeg_reference_segment(&stream, &out->sos);
+ if (ret < 0)
+ return ret;
+ ret = jpeg_parse_scan_header(&stream, out->scan);
+ /*
+ * stop parsing, the scan header marks the beginning of
+ * the entropy coded segment
+ */
+ out->ecs_offset = stream.curr - (u8 *)buf;
+ return ret;
+
+ /* markers without parameters */
+ case RST0 ... RST7: /* restart */
+ case SOI: /* start of image */
+ case EOI: /* end of image */
+ break;
+
+ /* skip unknown or unsupported marker segments */
+ default:
+ ret = jpeg_skip_segment(&stream);
+ break;
+ }
+ if (ret < 0)
+ return ret;
+ }
+
+ return marker;
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_header);
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
new file mode 100644
index 000000000000..937d358697e1
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/*
+ * Media Controller ancillary functions
+ *
+ * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
+ * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
+ * Copyright (C) 2006-2010 Nokia Corporation
+ * Copyright (c) 2016 Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-core.h>
+
+int v4l2_mc_create_media_graph(struct media_device *mdev)
+
+{
+ struct media_entity *entity;
+ struct media_entity *if_vid = NULL, *if_aud = NULL;
+ struct media_entity *tuner = NULL, *decoder = NULL;
+ struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
+ bool is_webcam = false;
+ u32 flags;
+ int ret, pad_sink, pad_source;
+
+ if (!mdev)
+ return 0;
+
+ media_device_for_each_entity(entity, mdev) {
+ switch (entity->function) {
+ case MEDIA_ENT_F_IF_VID_DECODER:
+ if_vid = entity;
+ break;
+ case MEDIA_ENT_F_IF_AUD_DECODER:
+ if_aud = entity;
+ break;
+ case MEDIA_ENT_F_TUNER:
+ tuner = entity;
+ break;
+ case MEDIA_ENT_F_ATV_DECODER:
+ decoder = entity;
+ break;
+ case MEDIA_ENT_F_IO_V4L:
+ io_v4l = entity;
+ break;
+ case MEDIA_ENT_F_IO_VBI:
+ io_vbi = entity;
+ break;
+ case MEDIA_ENT_F_IO_SWRADIO:
+ io_swradio = entity;
+ break;
+ case MEDIA_ENT_F_CAM_SENSOR:
+ is_webcam = true;
+ break;
+ }
+ }
+
+ /* It should have at least one I/O entity */
+ if (!io_v4l && !io_vbi && !io_swradio) {
+ dev_warn(mdev->dev, "Didn't find any I/O entity\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Here, webcams are modelled on a very simple way: the sensor is
+ * connected directly to the I/O entity. All dirty details, like
+ * scaler and crop HW are hidden. While such mapping is not enough
+ * for mc-centric hardware, it is enough for v4l2 interface centric
+ * PC-consumer's hardware.
+ */
+ if (is_webcam) {
+ if (!io_v4l) {
+ dev_warn(mdev->dev, "Didn't find a MEDIA_ENT_F_IO_V4L\n");
+ return -EINVAL;
+ }
+
+ media_device_for_each_entity(entity, mdev) {
+ if (entity->function != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+ ret = media_create_pad_link(entity, 0,
+ io_v4l, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "Failed to create a sensor link\n");
+ return ret;
+ }
+ }
+ if (!decoder)
+ return 0;
+ }
+
+ /* The device isn't a webcam. So, it should have a decoder */
+ if (!decoder) {
+ dev_warn(mdev->dev, "Decoder not found\n");
+ return -EINVAL;
+ }
+
+ /* Link the tuner and IF video output pads */
+ if (tuner) {
+ if (if_vid) {
+ pad_source = media_get_pad_index(tuner,
+ MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_ANALOG);
+ pad_sink = media_get_pad_index(if_vid,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "Couldn't get tuner and/or PLL pad(s): (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(tuner, pad_source,
+ if_vid, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "Couldn't create tuner->PLL link)\n");
+ return ret;
+ }
+
+ pad_source = media_get_pad_index(if_vid,
+ MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_ANALOG);
+ pad_sink = media_get_pad_index(decoder,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "get decoder and/or PLL pad(s): (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(if_vid, pad_source,
+ decoder, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link PLL to decoder\n");
+ return ret;
+ }
+ } else {
+ pad_source = media_get_pad_index(tuner,
+ MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_ANALOG);
+ pad_sink = media_get_pad_index(decoder,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s): (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(tuner, pad_source,
+ decoder, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ }
+
+ if (if_aud) {
+ pad_source = media_get_pad_index(tuner,
+ MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_AUDIO);
+ pad_sink = media_get_pad_index(if_aud,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_AUDIO);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s) for audio: (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(tuner, pad_source,
+ if_aud, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link tuner->audio PLL\n");
+ return ret;
+ }
+ } else {
+ if_aud = tuner;
+ }
+
+ }
+
+ /* Create demod to V4L, VBI and SDR radio links */
+ if (io_v4l) {
+ pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_DV);
+ if (pad_source < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder output pad for V4L I/O\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(decoder, pad_source,
+ io_v4l, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link decoder output to V4L I/O\n");
+ return ret;
+ }
+ }
+
+ if (io_swradio) {
+ pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_DV);
+ if (pad_source < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder output pad for SDR\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(decoder, pad_source,
+ io_swradio, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link decoder output to SDR\n");
+ return ret;
+ }
+ }
+
+ if (io_vbi) {
+ pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_DV);
+ if (pad_source < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder output pad for VBI\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(decoder, pad_source,
+ io_vbi, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link decoder output to VBI\n");
+ return ret;
+ }
+ }
+
+ /* Create links for the media connectors */
+ flags = MEDIA_LNK_FL_ENABLED;
+ media_device_for_each_entity(entity, mdev) {
+ switch (entity->function) {
+ case MEDIA_ENT_F_CONN_RF:
+ if (!tuner)
+ continue;
+ pad_sink = media_get_pad_index(tuner, MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(entity, 0, tuner,
+ pad_sink,
+ flags);
+ break;
+ case MEDIA_ENT_F_CONN_SVIDEO:
+ case MEDIA_ENT_F_CONN_COMPOSITE:
+ pad_sink = media_get_pad_index(decoder,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder analog pad sink\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(entity, 0, decoder,
+ pad_sink,
+ flags);
+ break;
+ default:
+ continue;
+ }
+ if (ret)
+ return ret;
+
+ flags = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph);
+
+int v4l_enable_media_source(struct video_device *vdev)
+{
+ struct media_device *mdev = vdev->entity.graph_obj.mdev;
+ int ret = 0, err;
+
+ if (!mdev)
+ return 0;
+
+ mutex_lock(&mdev->graph_mutex);
+ if (!mdev->enable_source)
+ goto end;
+ err = mdev->enable_source(&vdev->entity, &vdev->pipe);
+ if (err)
+ ret = -EBUSY;
+end:
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l_enable_media_source);
+
+void v4l_disable_media_source(struct video_device *vdev)
+{
+ struct media_device *mdev = vdev->entity.graph_obj.mdev;
+
+ if (mdev) {
+ mutex_lock(&mdev->graph_mutex);
+ if (mdev->disable_source)
+ mdev->disable_source(&vdev->entity);
+ mutex_unlock(&mdev->graph_mutex);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l_disable_media_source);
+
+int v4l_vb2q_enable_media_source(struct vb2_queue *q)
+{
+ struct v4l2_fh *fh = q->owner;
+
+ if (fh && fh->vdev)
+ return v4l_enable_media_source(fh->vdev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source);
+
+int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
+ struct media_pad *sink, u32 flags)
+{
+ struct fwnode_handle *endpoint;
+
+ if (!(sink->flags & MEDIA_PAD_FL_SINK))
+ return -EINVAL;
+
+ fwnode_graph_for_each_endpoint(src_sd->fwnode, endpoint) {
+ struct fwnode_handle *remote_ep;
+ int src_idx, sink_idx, ret;
+ struct media_pad *src;
+
+ src_idx = media_entity_get_fwnode_pad(&src_sd->entity,
+ endpoint,
+ MEDIA_PAD_FL_SOURCE);
+ if (src_idx < 0) {
+ dev_dbg(src_sd->dev, "no source pad found for %pfw\n",
+ endpoint);
+ continue;
+ }
+
+ remote_ep = fwnode_graph_get_remote_endpoint(endpoint);
+ if (!remote_ep) {
+ dev_dbg(src_sd->dev, "no remote ep found for %pfw\n",
+ endpoint);
+ continue;
+ }
+
+ /*
+ * ask the sink to verify it owns the remote endpoint,
+ * and translate to a sink pad.
+ */
+ sink_idx = media_entity_get_fwnode_pad(sink->entity,
+ remote_ep,
+ MEDIA_PAD_FL_SINK);
+ fwnode_handle_put(remote_ep);
+
+ if (sink_idx < 0 || sink_idx != sink->index) {
+ dev_dbg(src_sd->dev,
+ "sink pad index mismatch or error (is %d, expected %u)\n",
+ sink_idx, sink->index);
+ continue;
+ }
+
+ /*
+ * the source endpoint corresponds to one of its source pads,
+ * the source endpoint connects to an endpoint at the sink
+ * entity, and the sink endpoint corresponds to the sink
+ * pad requested, so we have found an endpoint connection
+ * that works, create the media link for it.
+ */
+
+ src = &src_sd->entity.pads[src_idx];
+
+ /* skip if link already exists */
+ if (media_entity_find_link(src, sink)) {
+ dev_dbg(src_sd->dev,
+ "link %s:%d -> %s:%d already exists\n",
+ src_sd->entity.name, src_idx,
+ sink->entity->name, sink_idx);
+ continue;
+ }
+
+ dev_dbg(src_sd->dev, "creating link %s:%d -> %s:%d\n",
+ src_sd->entity.name, src_idx,
+ sink->entity->name, sink_idx);
+
+ ret = media_create_pad_link(&src_sd->entity, src_idx,
+ sink->entity, sink_idx, flags);
+ if (ret) {
+ dev_err(src_sd->dev,
+ "link %s:%d -> %s:%d failed with %d\n",
+ src_sd->entity.name, src_idx,
+ sink->entity->name, sink_idx, ret);
+
+ fwnode_handle_put(endpoint);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links_to_pad);
+
+int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd,
+ struct v4l2_subdev *sink_sd)
+{
+ unsigned int i;
+
+ for (i = 0; i < sink_sd->entity.num_pads; i++) {
+ struct media_pad *pad = &sink_sd->entity.pads[i];
+ int ret;
+
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ ret = v4l2_create_fwnode_links_to_pad(src_sd, pad, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links);
+
+/* -----------------------------------------------------------------------------
+ * Pipeline power management
+ *
+ * Entities must be powered up when part of a pipeline that contains at least
+ * one open video device node.
+ *
+ * To achieve this use the entity use_count field to track the number of users.
+ * For entities corresponding to video device nodes the use_count field stores
+ * the users count of the node. For entities corresponding to subdevs the
+ * use_count field stores the total number of users of all video device nodes
+ * in the pipeline.
+ *
+ * The v4l2_pipeline_pm_{get, put}() functions must be called in the open() and
+ * close() handlers of video device nodes. It increments or decrements the use
+ * count of all subdev entities in the pipeline.
+ *
+ * To react to link management on powered pipelines, the link setup notification
+ * callback updates the use count of all entities in the source and sink sides
+ * of the link.
+ */
+
+/*
+ * pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int pipeline_pm_use_count(struct media_entity *entity,
+ struct media_graph *graph)
+{
+ int use = 0;
+
+ media_graph_walk_start(graph, entity);
+
+ while ((entity = media_graph_walk_next(graph))) {
+ if (is_media_entity_v4l2_video_device(entity))
+ use += entity->use_count;
+ }
+
+ return use;
+}
+
+/*
+ * pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = is_media_entity_v4l2_subdev(entity)
+ ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+ if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ if (entity->use_count == 0 && change < 0 && subdev != NULL)
+ v4l2_subdev_call(subdev, core, s_power, 0);
+
+ return 0;
+}
+
+/*
+ * pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int pipeline_pm_power(struct media_entity *entity, int change,
+ struct media_graph *graph)
+{
+ struct media_entity *first = entity;
+ int ret = 0;
+
+ if (!change)
+ return 0;
+
+ media_graph_walk_start(graph, entity);
+
+ while (!ret && (entity = media_graph_walk_next(graph)))
+ if (is_media_entity_v4l2_subdev(entity))
+ ret = pipeline_pm_power_one(entity, change);
+
+ if (!ret)
+ return ret;
+
+ media_graph_walk_start(graph, first);
+
+ while ((first = media_graph_walk_next(graph))
+ && first != entity)
+ if (is_media_entity_v4l2_subdev(first))
+ pipeline_pm_power_one(first, -change);
+
+ return ret;
+}
+
+static int v4l2_pipeline_pm_use(struct media_entity *entity, unsigned int use)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ int change = use ? 1 : -1;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Apply use count to node. */
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ /* Apply power change to connected non-nodes. */
+ ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk);
+ if (ret < 0)
+ entity->use_count -= change;
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ return ret;
+}
+
+int v4l2_pipeline_pm_get(struct media_entity *entity)
+{
+ return v4l2_pipeline_pm_use(entity, 1);
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_get);
+
+void v4l2_pipeline_pm_put(struct media_entity *entity)
+{
+ /* Powering off entities shouldn't fail. */
+ WARN_ON(v4l2_pipeline_pm_use(entity, 0));
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_put);
+
+int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct media_graph *graph = &link->graph_obj.mdev->pm_count_walk;
+ struct media_entity *source = link->source->entity;
+ struct media_entity *sink = link->sink->entity;
+ int source_use;
+ int sink_use;
+ int ret = 0;
+
+ source_use = pipeline_pm_use_count(source, graph);
+ sink_use = pipeline_pm_use_count(sink, graph);
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ /* Powering off entities is assumed to never fail. */
+ pipeline_pm_power(source, -sink_use, graph);
+ pipeline_pm_power(sink, -source_use, graph);
+ return 0;
+ }
+
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+
+ ret = pipeline_pm_power(source, sink_use, graph);
+ if (ret < 0)
+ return ret;
+
+ ret = pipeline_pm_power(sink, source_use, graph);
+ if (ret < 0)
+ pipeline_pm_power(source, -sink_use, graph);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_link_notify);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index e96497f7c3ed..fec93c1a9231 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -1,29 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Memory-to-memory device framework for Video for Linux 2 and videobuf.
+ * Memory-to-memory device framework for Video for Linux 2 and vb2.
*
- * Helper functions for devices that use videobuf buffers for both their
+ * Helper functions for devices that use vb2 buffers for both their
* source and destination.
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <pawel@osciak.com>
* Marek Szyprowski, <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <media/videobuf2-core.h>
+#include <media/media-device.h>
+#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
-MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
+MODULE_DESCRIPTION("Mem to mem device framework for vb2");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
MODULE_LICENSE("GPL");
@@ -41,6 +39,12 @@ module_param(debug, bool, 0644);
#define TRANS_QUEUED (1 << 0)
/* Instance is currently running in hardware */
#define TRANS_RUNNING (1 << 1)
+/* Instance is currently aborting */
+#define TRANS_ABORT (1 << 2)
+
+
+/* The job queue is not running new jobs */
+#define QUEUE_PAUSED (1 << 0)
/* Offset base for buffers on the destination queue - used to distinguish
@@ -48,19 +52,61 @@ module_param(debug, bool, 0644);
* offsets but for different queues */
#define DST_QUEUE_OFF_BASE (1 << 30)
+enum v4l2_m2m_entity_type {
+ MEM2MEM_ENT_TYPE_SOURCE,
+ MEM2MEM_ENT_TYPE_SINK,
+ MEM2MEM_ENT_TYPE_PROC
+};
+
+static const char * const m2m_entity_name[] = {
+ "source",
+ "sink",
+ "proc"
+};
/**
* struct v4l2_m2m_dev - per-device context
+ * @source: &struct media_entity pointer with the source entity
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_register_media_controller().
+ * @source_pad: &struct media_pad with the source pad.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_register_media_controller().
+ * @sink: &struct media_entity pointer with the sink entity
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_register_media_controller().
+ * @sink_pad: &struct media_pad with the sink pad.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_register_media_controller().
+ * @proc: &struct media_entity pointer with the M2M device itself.
+ * @proc_pads: &struct media_pad with the @proc pads.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @intf_devnode: &struct media_intf devnode pointer with the interface
+ * with controls the M2M device.
* @curr_ctx: currently running instance
* @job_queue: instances queued to run
* @job_spinlock: protects job_queue
+ * @job_work: worker to run queued jobs.
+ * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
* @m2m_ops: driver callbacks
*/
struct v4l2_m2m_dev {
struct v4l2_m2m_ctx *curr_ctx;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_entity *source;
+ struct media_pad source_pad;
+ struct media_entity sink;
+ struct media_pad sink_pad;
+ struct media_entity proc;
+ struct media_pad proc_pads[2];
+ struct media_intf_devnode *intf_devnode;
+#endif
struct list_head job_queue;
spinlock_t job_spinlock;
+ struct work_struct job_work;
+ unsigned long job_queue_flags;
const struct v4l2_m2m_ops *m2m_ops;
};
@@ -74,28 +120,16 @@ static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
return &m2m_ctx->cap_q_ctx;
}
-/**
- * v4l2_m2m_get_vq() - return vb2_queue for the given type
- */
struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
- struct v4l2_m2m_queue_ctx *q_ctx;
-
- q_ctx = get_queue_ctx(m2m_ctx, type);
- if (!q_ctx)
- return NULL;
-
- return &q_ctx->q;
+ return &get_queue_ctx(m2m_ctx, type)->q;
}
EXPORT_SYMBOL(v4l2_m2m_get_vq);
-/**
- * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
- */
-void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
+struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
- struct v4l2_m2m_buffer *b = NULL;
+ struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
@@ -111,13 +145,27 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
-/**
- * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
- * return it
- */
-void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
+struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
- struct v4l2_m2m_buffer *b = NULL;
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
+ }
+
+ b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return &b->vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
+
+struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
+{
+ struct v4l2_m2m_buffer *b;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
@@ -134,14 +182,47 @@ void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
+void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
+ list_del(&b->list);
+ q_ctx->num_rdy--;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
+
+struct vb2_v4l2_buffer *
+v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
+
+{
+ struct v4l2_m2m_buffer *b, *tmp;
+ struct vb2_v4l2_buffer *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
+ if (b->vb.vb2_buf.index == idx) {
+ list_del(&b->list);
+ q_ctx->num_rdy--;
+ ret = &b->vb;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
+
/*
* Scheduling handlers
*/
-/**
- * v4l2_m2m_get_curr_priv() - return driver private data for the currently
- * running instance or NULL if no instance is running
- */
void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
@@ -158,8 +239,12 @@ EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
/**
* v4l2_m2m_try_run() - select next job to perform and run it if possible
+ * @m2m_dev: per-device context
*
* Get next transaction (if present) from the waiting jobs list and run it.
+ *
+ * Note that this function can run on a given v4l2_m2m_ctx context,
+ * but call .device_run for another context.
*/
static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
{
@@ -178,157 +263,334 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
return;
}
+ if (m2m_dev->job_queue_flags & QUEUE_PAUSED) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Running new jobs is paused\n");
+ return;
+ }
+
m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
struct v4l2_m2m_ctx, queue);
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
}
-/**
- * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
- * the pending job queue and add it if so.
- * @m2m_ctx: m2m context assigned to the instance to be checked
+/*
+ * __v4l2_m2m_try_queue() - queue a job
+ * @m2m_dev: m2m device
+ * @m2m_ctx: m2m context
*
- * There are three basic requirements an instance has to meet to be able to run:
- * 1) at least one source buffer has to be queued,
- * 2) at least one destination buffer has to be queued,
- * 3) streaming has to be on.
+ * Check if this context is ready to queue a job.
*
- * There may also be additional, custom requirements. In such case the driver
- * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
- * return 1 if the instance is ready.
- * An example of the above could be an instance that requires more than one
- * src/dst buffer per transaction.
+ * This function can run in interrupt context.
*/
-static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
{
- struct v4l2_m2m_dev *m2m_dev;
- unsigned long flags_job, flags_out, flags_cap;
+ unsigned long flags_job;
+ struct vb2_v4l2_buffer *dst, *src;
- m2m_dev = m2m_ctx->m2m_dev;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
- if (!m2m_ctx->out_q_ctx.q.streaming
- || !m2m_ctx->cap_q_ctx.q.streaming) {
- dprintk("Streaming needs to be on for both queues\n");
+ if (!m2m_ctx->out_q_ctx.q.streaming ||
+ (!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) {
+ if (!m2m_ctx->ignore_cap_streaming)
+ dprintk("Streaming needs to be on for both queues\n");
+ else
+ dprintk("Streaming needs to be on for the OUTPUT queue\n");
return;
}
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+
+ /* If the context is aborted then don't schedule it */
+ if (m2m_ctx->job_flags & TRANS_ABORT) {
+ dprintk("Aborted context\n");
+ goto job_unlock;
+ }
+
if (m2m_ctx->job_flags & TRANS_QUEUED) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("On job queue already\n");
- return;
+ goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
- if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
- flags_out);
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ src = v4l2_m2m_next_src_buf(m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+ if (!src && !m2m_ctx->out_q_ctx.buffered) {
dprintk("No input buffers available\n");
- return;
+ goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
- flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
- flags_out);
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available\n");
- return;
+ goto job_unlock;
+ }
+
+ m2m_ctx->new_frame = true;
+
+ if (src && dst && dst->is_held &&
+ dst->vb2_buf.copied_timestamp &&
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
+ dprintk("Timestamp mismatch, returning held capture buffer\n");
+ dst->is_held = false;
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
+ dprintk("No output buffers available after returning held buffer\n");
+ goto job_unlock;
+ }
+ }
+
+ if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
+
+ if (m2m_ctx->has_stopped) {
+ dprintk("Device has stopped\n");
+ goto job_unlock;
}
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("Driver not ready\n");
- return;
+ goto job_unlock;
}
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
+job_unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+}
+
+/**
+ * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
+ * @m2m_ctx: m2m context
+ *
+ * Check if this context is ready to queue a job. If suitable,
+ * run the next queued job on the mem2mem device.
+ *
+ * This function shouldn't run in interrupt context.
+ *
+ * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
+ * and then run another job for another context.
+ */
+void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
v4l2_m2m_try_run(m2m_dev);
}
+EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
/**
- * v4l2_m2m_job_finish() - inform the framework that a job has been finished
- * and have it clean up
- *
- * Called by a driver to yield back the device after it has finished with it.
- * Should be called as soon as possible after reaching a state which allows
- * other instances to take control of the device.
+ * v4l2_m2m_device_run_work() - run pending jobs for the context
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void v4l2_m2m_device_run_work(struct work_struct *work)
+{
+ struct v4l2_m2m_dev *m2m_dev =
+ container_of(work, struct v4l2_m2m_dev, job_work);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+
+/**
+ * v4l2_m2m_cancel_job() - cancel pending jobs for the context
+ * @m2m_ctx: m2m context with jobs to be canceled
*
- * This function has to be called only after device_run() callback has been
- * called on the driver. To prevent recursion, it should not be called directly
- * from the device_run() callback though.
+ * In case of streamoff or release called on any context,
+ * 1] If the context is currently running, then abort job will be called
+ * 2] If the context is queued, then the context will be removed from
+ * the job_queue
*/
-void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
- struct v4l2_m2m_ctx *m2m_ctx)
+static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
{
+ struct v4l2_m2m_dev *m2m_dev;
unsigned long flags;
+ m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
- if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
+
+ m2m_ctx->job_flags |= TRANS_ABORT;
+ if (m2m_ctx->job_flags & TRANS_RUNNING) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ if (m2m_dev->m2m_ops->job_abort)
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
+ wait_event(m2m_ctx->finished,
+ !(m2m_ctx->job_flags & TRANS_RUNNING));
+ } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("m2m_ctx: %p had been on queue and was removed\n",
+ m2m_ctx);
+ } else {
+ /* Do nothing, was not on queue/running */
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ }
+}
+
+/*
+ * Schedule the next job, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ /*
+ * This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes.
+ */
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+
+ /*
+ * We might be running in atomic context,
+ * but the job must be run in non-atomic context.
+ */
+ schedule_work(&m2m_dev->job_work);
+}
+
+/*
+ * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
dprintk("Called by an instance not currently running\n");
- return;
+ return false;
}
list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
+ return true;
+}
+
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+ bool schedule_next;
+ /*
+ * This function should not be used for drivers that support
+ * holding capture buffers. Those should use
+ * v4l2_m2m_buf_done_and_job_finish() instead.
+ */
+ WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- /* This instance might have more buffers ready, but since we do not
- * allow more than one job on the job_queue per instance, each has
- * to be scheduled separately after the previous one finishes. */
- v4l2_m2m_try_schedule(m2m_ctx);
- v4l2_m2m_try_run(m2m_dev);
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
-/**
- * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
- */
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ bool schedule_next = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (WARN_ON(!src_buf || !dst_buf))
+ goto unlock;
+ dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ if (!dst_buf->is_held) {
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst_buf, state);
+ }
+ /*
+ * If the request API is being used, returning the OUTPUT
+ * (src) buffer will wake-up any process waiting on the
+ * request file descriptor.
+ *
+ * Therefore, return the CAPTURE (dst) buffer first,
+ * to avoid signalling the request file descriptor
+ * before the CAPTURE buffer is done.
+ */
+ v4l2_m2m_buf_done(src_buf, state);
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+unlock:
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
+}
+EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
+
+void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+ struct v4l2_m2m_ctx *curr_ctx;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ m2m_dev->job_queue_flags |= QUEUE_PAUSED;
+ curr_ctx = m2m_dev->curr_ctx;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (curr_ctx)
+ wait_event(curr_ctx->finished,
+ !(curr_ctx->job_flags & TRANS_RUNNING));
+}
+EXPORT_SYMBOL(v4l2_m2m_suspend);
+
+void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ m2m_dev->job_queue_flags &= ~QUEUE_PAUSED;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+EXPORT_SYMBOL(v4l2_m2m_resume);
+
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
struct vb2_queue *vq;
+ int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
- return vb2_reqbufs(vq, reqbufs);
+ ret = vb2_reqbufs(vq, reqbufs);
+ /* If count == 0, then the owner has released all buffers and he
+ is no longer owner of the queue. Otherwise we have an owner. */
+ if (ret == 0)
+ vq->owner = reqbufs->count ? file->private_data : NULL;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
-/**
- * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
- *
- * See v4l2_m2m_mmap() documentation for details.
- */
-int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
- struct v4l2_buffer *buf)
+static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
+ struct v4l2_buffer *buf)
{
- struct vb2_queue *vq;
- int ret = 0;
- unsigned int i;
-
- vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_querybuf(vq, buf);
-
/* Adjust MMAP memory offsets for the CAPTURE queue */
- if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) {
if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
+ unsigned int i;
+
for (i = 0; i < buf->length; ++i)
buf->m.planes[i].m.mem_offset
+= DST_QUEUE_OFF_BASE;
@@ -336,48 +598,236 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
buf->m.offset += DST_QUEUE_OFF_BASE;
}
}
+}
- return ret;
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_querybuf(vq, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
-/**
- * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
- * the type
+/*
+ * This will add the LAST flag and mark the buffer management
+ * state as stopped.
+ * This is called when the last capture buffer must be flagged as LAST
+ * in draining mode from the encoder/decoder driver buf_queue() callback
+ * or from v4l2_update_last_buf_state() when a capture buffer is available.
+ */
+void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_mark_stopped(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done);
+
+/* When stop command is issued, update buffer management state */
+static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct vb2_v4l2_buffer *next_dst_buf;
+
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ return 0;
+
+ m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx);
+ m2m_ctx->is_draining = true;
+
+ /*
+ * The processing of the last output buffer queued before
+ * the STOP command is expected to mark the buffer management
+ * state as stopped with v4l2_m2m_mark_stopped().
+ */
+ if (m2m_ctx->last_src_buf)
+ return 0;
+
+ /*
+ * In case the output queue is empty, try to mark the last capture
+ * buffer as LAST.
+ */
+ next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (!next_dst_buf) {
+ /*
+ * Wait for the next queued one in encoder/decoder driver
+ * buf_queue() callback using the v4l2_m2m_dst_buf_is_last()
+ * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet
+ * streaming.
+ */
+ m2m_ctx->next_buf_last = true;
+ return 0;
+ }
+
+ v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf);
+
+ return 0;
+}
+
+/*
+ * Updates the encoding/decoding buffer management state, should
+ * be called from encoder/decoder drivers start_streaming()
*/
+void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q)
+{
+ /* If start streaming again, untag the last output buffer */
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ m2m_ctx->last_src_buf = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state);
+
+/*
+ * Updates the encoding/decoding buffer management state, should
+ * be called from encoder/decoder driver stop_streaming()
+ */
+void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q)
+{
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * If in draining state, either mark next dst buffer as
+ * done or flag next one to be marked as done either
+ * in encoder/decoder driver buf_queue() callback using
+ * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf()
+ * if encoder/decoder is not yet streaming
+ */
+ if (m2m_ctx->is_draining) {
+ struct vb2_v4l2_buffer *next_dst_buf;
+
+ m2m_ctx->last_src_buf = NULL;
+ next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (!next_dst_buf)
+ m2m_ctx->next_buf_last = true;
+ else
+ v4l2_m2m_last_buffer_done(m2m_ctx,
+ next_dst_buf);
+ }
+ } else {
+ v4l2_m2m_clear_state(m2m_ctx);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state);
+
+static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned int i;
+
+ if (WARN_ON(q->is_output))
+ return;
+ if (list_empty(&q->queued_list))
+ return;
+
+ vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry);
+ for (i = 0; i < vb->num_planes; i++)
+ vb2_set_plane_payload(vb, i, 0);
+
+ /*
+ * Since the buffer hasn't been queued to the ready queue,
+ * mark is active and owned before marking it LAST and DONE
+ */
+ vb->state = VB2_BUF_STATE_ACTIVE;
+ atomic_inc(&q->owned_by_drv_count);
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+ vbuf->field = V4L2_FIELD_NONE;
+
+ v4l2_m2m_last_buffer_done(m2m_ctx, vbuf);
+}
+
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
+ struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_qbuf(vq, buf);
- if (!ret)
+ if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
+ (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
+ dprintk("%s: requests cannot be used with capture buffers\n",
+ __func__);
+ return -EPERM;
+ }
+
+ ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ /*
+ * If the capture queue is streaming, but streaming hasn't started
+ * on the device, but was asked to stop, mark the previously queued
+ * buffer as DONE with LAST flag since it won't be queued on the
+ * device.
+ */
+ if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
+ vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) &&
+ (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx)))
+ v4l2_m2m_force_last_buf_done(m2m_ctx, vq);
+ else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
v4l2_m2m_try_schedule(m2m_ctx);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
-/**
- * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
- * the type
- */
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
+ int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
-/**
- * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
- * on the type
- */
+int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
+
int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_create_buffers *create)
{
@@ -388,10 +838,6 @@ int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
-/**
- * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
- * the type
- */
int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_exportbuffer *eb)
{
@@ -401,9 +847,7 @@ int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
return vb2_expbuf(vq, eb);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
-/**
- * v4l2_m2m_streamon() - turn on streaming for a video queue
- */
+
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
@@ -419,9 +863,6 @@ int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
-/**
- * v4l2_m2m_streamoff() - turn off streaming for a video queue
- */
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
@@ -430,6 +871,9 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
unsigned long flags_job, flags;
int ret;
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
+
q_ctx = get_queue_ctx(m2m_ctx, type);
ret = vb2_streamoff(&q_ctx->q, type);
if (ret)
@@ -438,13 +882,15 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
/* We should not be scheduled anymore, since we're dropping a queue. */
- INIT_LIST_HEAD(&m2m_ctx->queue);
+ if (m2m_ctx->job_flags & TRANS_QUEUED)
+ list_del(&m2m_ctx->queue);
m2m_ctx->job_flags = 0;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
/* Drop queue, since streamoff returns device to the same state as after
* calling reqbufs. */
INIT_LIST_HEAD(&q_ctx->rdy_queue);
+ q_ctx->num_rdy = 0;
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
if (m2m_dev->curr_ctx == m2m_ctx) {
@@ -457,35 +903,14 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
-/**
- * v4l2_m2m_poll() - poll replacement, for destination buffers only
- *
- * Call from the driver's poll() function. Will poll both queues. If a buffer
- * is available to dequeue (with dqbuf) from the source queue, this will
- * indicate that a non-blocking write can be performed, while read will be
- * returned in case of the destination queue.
- */
-unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
- struct poll_table_struct *wait)
+static __poll_t v4l2_m2m_poll_for_data(struct file *file,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
{
- struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
struct vb2_queue *src_q, *dst_q;
- struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
unsigned long flags;
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- struct v4l2_fh *fh = file->private_data;
-
- if (v4l2_event_pending(fh))
- rc = POLLPRI;
- else if (req_events & POLLPRI)
- poll_wait(file, &fh->wait, wait);
- if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
- return rc;
- }
-
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
@@ -494,56 +919,59 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
* means either in driver already or waiting for driver to claim it
* and start processing.
*/
- if ((!src_q->streaming || list_empty(&src_q->queued_list))
- && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
- rc |= POLLERR;
- goto end;
- }
-
- if (m2m_ctx->m2m_dev->m2m_ops->unlock)
- m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
-
- if (list_empty(&src_q->done_list))
- poll_wait(file, &src_q->done_wq, wait);
- if (list_empty(&dst_q->done_list))
- poll_wait(file, &dst_q->done_wq, wait);
-
- if (m2m_ctx->m2m_dev->m2m_ops->lock)
- m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
+ if ((!vb2_is_streaming(src_q) || src_q->error ||
+ list_empty(&src_q->queued_list)) &&
+ (!vb2_is_streaming(dst_q) || dst_q->error ||
+ (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued)))
+ return EPOLLERR;
spin_lock_irqsave(&src_q->done_lock, flags);
if (!list_empty(&src_q->done_list))
- src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
- done_entry);
- if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
- || src_vb->state == VB2_BUF_STATE_ERROR))
- rc |= POLLOUT | POLLWRNORM;
+ rc |= EPOLLOUT | EPOLLWRNORM;
spin_unlock_irqrestore(&src_q->done_lock, flags);
spin_lock_irqsave(&dst_q->done_lock, flags);
- if (!list_empty(&dst_q->done_list))
- dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
- done_entry);
- if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
- || dst_vb->state == VB2_BUF_STATE_ERROR))
- rc |= POLLIN | POLLRDNORM;
+ /*
+ * If the last buffer was dequeued from the capture queue, signal
+ * userspace. DQBUF(CAPTURE) will return -EPIPE.
+ */
+ if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued)
+ rc |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&dst_q->done_lock, flags);
-end:
+ return rc;
+}
+
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t rc = 0;
+
+ /*
+ * poll_wait() MUST be called on the first invocation on all the
+ * potential queues of interest, even if we are not interested in their
+ * events during this first call. Failure to do so will result in
+ * queue's events to be ignored because the poll_table won't be capable
+ * of adding new wait queues thereafter.
+ */
+ poll_wait(file, &src_q->done_wq, wait);
+ poll_wait(file, &dst_q->done_wq, wait);
+
+ if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
+ rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
+
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ rc |= EPOLLPRI;
+
return rc;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
-/**
- * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
- *
- * Call from driver's mmap() function. Will handle mmap() for both queues
- * seamlessly for videobuffer, which will receive normal per-queue offsets and
- * proper videobuf queue pointers. The differentiation is made outside videobuf
- * by adding a predefined offset to buffers from one of the queues and
- * subtracting it before passing it back to videobuf. Only drivers (and
- * thus applications) receive modified offsets.
- */
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct vm_area_struct *vma)
{
@@ -561,17 +989,206 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL(v4l2_m2m_mmap);
-/**
- * v4l2_m2m_init() - initialize per-driver m2m data
- *
- * Usually called from driver's probe() function.
- */
+#ifndef CONFIG_MMU
+unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ unsigned long offset = pgoff << PAGE_SHIFT;
+ struct vb2_queue *vq;
+
+ if (offset < DST_QUEUE_OFF_BASE) {
+ vq = v4l2_m2m_get_src_vq(fh->m2m_ctx);
+ } else {
+ vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx);
+ pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ }
+
+ return vb2_get_unmapped_area(vq, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area);
+#endif
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
+{
+ media_remove_intf_links(&m2m_dev->intf_devnode->intf);
+ media_devnode_remove(m2m_dev->intf_devnode);
+
+ media_entity_remove_links(m2m_dev->source);
+ media_entity_remove_links(&m2m_dev->sink);
+ media_entity_remove_links(&m2m_dev->proc);
+ media_device_unregister_entity(m2m_dev->source);
+ media_device_unregister_entity(&m2m_dev->sink);
+ media_device_unregister_entity(&m2m_dev->proc);
+ kfree(m2m_dev->source->name);
+ kfree(m2m_dev->sink.name);
+ kfree(m2m_dev->proc.name);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
+
+static int v4l2_m2m_register_entity(struct media_device *mdev,
+ struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
+ struct video_device *vdev, int function)
+{
+ struct media_entity *entity;
+ struct media_pad *pads;
+ char *name;
+ unsigned int len;
+ int num_pads;
+ int ret;
+
+ switch (type) {
+ case MEM2MEM_ENT_TYPE_SOURCE:
+ entity = m2m_dev->source;
+ pads = &m2m_dev->source_pad;
+ pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ num_pads = 1;
+ break;
+ case MEM2MEM_ENT_TYPE_SINK:
+ entity = &m2m_dev->sink;
+ pads = &m2m_dev->sink_pad;
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ num_pads = 1;
+ break;
+ case MEM2MEM_ENT_TYPE_PROC:
+ entity = &m2m_dev->proc;
+ pads = m2m_dev->proc_pads;
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ num_pads = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
+ if (type != MEM2MEM_ENT_TYPE_PROC) {
+ entity->info.dev.major = VIDEO_MAJOR;
+ entity->info.dev.minor = vdev->minor;
+ }
+ len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
+ name = kmalloc(len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
+ entity->name = name;
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+ if (ret) {
+ kfree(entity->name);
+ entity->name = NULL;
+ return ret;
+ }
+ ret = media_device_register_entity(mdev, entity);
+ if (ret) {
+ kfree(entity->name);
+ entity->name = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function)
+{
+ struct media_device *mdev = vdev->v4l2_dev->mdev;
+ struct media_link *link;
+ int ret;
+
+ if (!mdev)
+ return 0;
+
+ /* A memory-to-memory device consists in two
+ * DMA engine and one video processing entities.
+ * The DMA engine entities are linked to a V4L interface
+ */
+
+ /* Create the three entities with their pads */
+ m2m_dev->source = &vdev->entity;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
+ if (ret)
+ return ret;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_PROC, vdev, function);
+ if (ret)
+ goto err_rel_entity0;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
+ if (ret)
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+ ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+
+ /* Create video interface */
+ m2m_dev->intf_devnode = media_devnode_create(mdev,
+ MEDIA_INTF_T_V4L_VIDEO, 0,
+ VIDEO_MAJOR, vdev->minor);
+ if (!m2m_dev->intf_devnode) {
+ ret = -ENOMEM;
+ goto err_rm_links1;
+ }
+
+ /* Connect the two DMA engines to the interface */
+ link = media_create_intf_link(m2m_dev->source,
+ &m2m_dev->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+
+ link = media_create_intf_link(&m2m_dev->sink,
+ &m2m_dev->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_intf_link;
+ }
+ return 0;
+
+err_rm_intf_link:
+ media_remove_intf_links(&m2m_dev->intf_devnode->intf);
+err_rm_devnode:
+ media_devnode_remove(m2m_dev->intf_devnode);
+err_rm_links1:
+ media_entity_remove_links(&m2m_dev->sink);
+err_rm_links0:
+ media_entity_remove_links(&m2m_dev->proc);
+ media_entity_remove_links(m2m_dev->source);
+err_rel_entity2:
+ media_device_unregister_entity(&m2m_dev->proc);
+ kfree(m2m_dev->proc.name);
+err_rel_entity1:
+ media_device_unregister_entity(&m2m_dev->sink);
+ kfree(m2m_dev->sink.name);
+err_rel_entity0:
+ media_device_unregister_entity(m2m_dev->source);
+ kfree(m2m_dev->source->name);
+ return ret;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
+#endif
+
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
- if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
- WARN_ON(!m2m_ops->job_abort))
+ if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
return ERR_PTR(-EINVAL);
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
@@ -582,31 +1199,18 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
m2m_dev->m2m_ops = m2m_ops;
INIT_LIST_HEAD(&m2m_dev->job_queue);
spin_lock_init(&m2m_dev->job_spinlock);
+ INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
return m2m_dev;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_init);
-/**
- * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
- *
- * Usually called from driver's remove() function.
- */
void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
{
kfree(m2m_dev);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_release);
-/**
- * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
- * @priv - driver's instance private data
- * @m2m_dev - a previously initialized m2m_dev struct
- * @vq_init - a callback for queue type-specific initialization function to be
- * used for initializing videobuf_queues
- *
- * Usually called from driver's open() function.
- */
struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
void *drv_priv,
int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
@@ -637,6 +1241,15 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
if (ret)
goto err;
+ /*
+ * Both queues should use same the mutex to lock the m2m context.
+ * This lock is used in some v4l2_m2m_* helpers.
+ */
+ if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ m2m_ctx->q_lock = out_q_ctx->q.lock;
return m2m_ctx;
err:
@@ -645,34 +1258,10 @@ err:
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
-/**
- * v4l2_m2m_ctx_release() - release m2m context
- *
- * Usually called from driver's release() function.
- */
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
{
- struct v4l2_m2m_dev *m2m_dev;
- unsigned long flags;
-
- m2m_dev = m2m_ctx->m2m_dev;
-
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
- if (m2m_ctx->job_flags & TRANS_RUNNING) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
- dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
- wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
- } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
- list_del(&m2m_ctx->queue);
- m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- dprintk("m2m_ctx: %p had been on queue and was removed\n",
- m2m_ctx);
- } else {
- /* Do nothing, was not on queue/running */
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- }
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
vb2_queue_release(&m2m_ctx->out_q_ctx.q);
@@ -681,20 +1270,15 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
-/**
- * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
- *
- * Call from buf_queue(), videobuf_queue_ops callback.
- */
-void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
{
- struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
+ struct v4l2_m2m_buffer *b = container_of(vbuf,
+ struct v4l2_m2m_buffer, vb);
struct v4l2_m2m_queue_ctx *q_ctx;
unsigned long flags;
- q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
- if (!q_ctx)
- return;
+ q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
list_add_tail(&b->list, &q_ctx->rdy_queue);
@@ -703,3 +1287,338 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
+ struct vb2_v4l2_buffer *cap_vb)
+{
+ const u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp;
+
+ if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ cap_vb->timecode = out_vb->timecode;
+ cap_vb->field = out_vb->field;
+ cap_vb->flags &= ~mask;
+ cap_vb->flags |= out_vb->flags & mask;
+ cap_vb->vb2_buf.copied_timestamp = 1;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata);
+
+void v4l2_m2m_request_queue(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+ struct v4l2_m2m_ctx *m2m_ctx = NULL;
+
+ /*
+ * Queue all objects. Note that buffer objects are at the end of the
+ * objects list, after all other object types. Once buffer objects
+ * are queued, the driver might delete them immediately (if the driver
+ * processes the buffer at once), so we have to use
+ * list_for_each_entry_safe() to handle the case where the object we
+ * queue is deleted.
+ */
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
+ struct v4l2_m2m_ctx *m2m_ctx_obj;
+ struct vb2_buffer *vb;
+
+ if (!obj->ops->queue)
+ continue;
+
+ if (vb2_request_object_is_buffer(obj)) {
+ /* Sanity checks */
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
+ m2m_ctx_obj = container_of(vb->vb2_queue,
+ struct v4l2_m2m_ctx,
+ out_q_ctx.q);
+ WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
+ m2m_ctx = m2m_ctx_obj;
+ }
+
+ /*
+ * The buffer we queue here can in theory be immediately
+ * unbound, hence the use of list_for_each_entry_safe()
+ * above and why we call the queue op last.
+ */
+ obj->ops->queue(obj);
+ }
+
+ WARN_ON(!m2m_ctx);
+
+ if (m2m_ctx)
+ v4l2_m2m_try_schedule(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
+
+/* Videobuf2 ioctl helpers */
+
+int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
+
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
+
+int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv,
+ struct v4l2_remove_buffers *remove)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type);
+
+ if (q->type != remove->type)
+ return -EINVAL;
+
+ return vb2_core_remove_bufs(q, remove->index, remove->count);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs);
+
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
+
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
+
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
+
+int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
+
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
+
+int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
+
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
+
+int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
+ return -EINVAL;
+
+ ec->flags = 0;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
+
+int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
+ return -EINVAL;
+
+ dc->flags = 0;
+
+ if (dc->cmd == V4L2_DEC_CMD_STOP) {
+ dc->stop.pts = 0;
+ } else if (dc->cmd == V4L2_DEC_CMD_START) {
+ dc->start.speed = 0;
+ dc->start.format = V4L2_DEC_START_FMT_NONE;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
+
+/*
+ * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START
+ * Should be called from the encoder driver encoder_cmd() callback
+ */
+int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
+ return -EINVAL;
+
+ if (ec->cmd == V4L2_ENC_CMD_STOP)
+ return v4l2_update_last_buf_state(m2m_ctx);
+
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ m2m_ctx->has_stopped = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd);
+
+/*
+ * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START
+ * Should be called from the decoder driver decoder_cmd() callback
+ */
+int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
+ return -EINVAL;
+
+ if (dc->cmd == V4L2_DEC_CMD_STOP)
+ return v4l2_update_last_buf_state(m2m_ctx);
+
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ m2m_ctx->has_stopped = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd);
+
+int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd);
+
+int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd);
+
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_FLUSH)
+ return -EINVAL;
+
+ dc->flags = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
+
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct vb2_v4l2_buffer *out_vb, *cap_vb;
+ struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
+ unsigned long flags;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
+ cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
+
+ /*
+ * If there is an out buffer pending, then clear any HOLD flag.
+ *
+ * By clearing this flag we ensure that when this output
+ * buffer is processed any held capture buffer will be released.
+ */
+ if (out_vb) {
+ out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ } else if (cap_vb && cap_vb->is_held) {
+ /*
+ * If there were no output buffers, but there is a
+ * capture buffer that is held, then release that
+ * buffer.
+ */
+ cap_vb->is_held = false;
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
+ v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
+
+/*
+ * v4l2_file_operations helpers. It is assumed here same lock is used
+ * for the output and the capture buffer queue.
+ */
+
+int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+
+ return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
+
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+{
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ __poll_t ret;
+
+ if (m2m_ctx->q_lock)
+ mutex_lock(m2m_ctx->q_lock);
+
+ ret = v4l2_m2m_poll(file, m2m_ctx, wait);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
+
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
deleted file mode 100644
index aa59639d013c..000000000000
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * V4L2 OF binding parsing library
- *
- * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
- * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
- *
- * Copyright (C) 2012 Renesas Electronics Corp.
- * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <media/v4l2-of.h>
-
-static void v4l2_of_parse_csi_bus(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint)
-{
- struct v4l2_of_bus_mipi_csi2 *bus = &endpoint->bus.mipi_csi2;
- u32 data_lanes[ARRAY_SIZE(bus->data_lanes)];
- struct property *prop;
- bool have_clk_lane = false;
- unsigned int flags = 0;
- u32 v;
-
- prop = of_find_property(node, "data-lanes", NULL);
- if (prop) {
- const __be32 *lane = NULL;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(data_lanes); i++) {
- lane = of_prop_next_u32(prop, lane, &data_lanes[i]);
- if (!lane)
- break;
- }
- bus->num_data_lanes = i;
- while (i--)
- bus->data_lanes[i] = data_lanes[i];
- }
-
- if (!of_property_read_u32(node, "clock-lanes", &v)) {
- bus->clock_lane = v;
- have_clk_lane = true;
- }
-
- if (of_get_property(node, "clock-noncontinuous", &v))
- flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
- else if (have_clk_lane || bus->num_data_lanes > 0)
- flags |= V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
-
- bus->flags = flags;
- endpoint->bus_type = V4L2_MBUS_CSI2;
-}
-
-static void v4l2_of_parse_parallel_bus(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint)
-{
- struct v4l2_of_bus_parallel *bus = &endpoint->bus.parallel;
- unsigned int flags = 0;
- u32 v;
-
- if (!of_property_read_u32(node, "hsync-active", &v))
- flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
- V4L2_MBUS_HSYNC_ACTIVE_LOW;
-
- if (!of_property_read_u32(node, "vsync-active", &v))
- flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
- V4L2_MBUS_VSYNC_ACTIVE_LOW;
-
- if (!of_property_read_u32(node, "pclk-sample", &v))
- flags |= v ? V4L2_MBUS_PCLK_SAMPLE_RISING :
- V4L2_MBUS_PCLK_SAMPLE_FALLING;
-
- if (!of_property_read_u32(node, "field-even-active", &v))
- flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
- V4L2_MBUS_FIELD_EVEN_LOW;
- if (flags)
- endpoint->bus_type = V4L2_MBUS_PARALLEL;
- else
- endpoint->bus_type = V4L2_MBUS_BT656;
-
- if (!of_property_read_u32(node, "data-active", &v))
- flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
- V4L2_MBUS_DATA_ACTIVE_LOW;
-
- if (of_get_property(node, "slave-mode", &v))
- flags |= V4L2_MBUS_SLAVE;
- else
- flags |= V4L2_MBUS_MASTER;
-
- if (!of_property_read_u32(node, "bus-width", &v))
- bus->bus_width = v;
-
- if (!of_property_read_u32(node, "data-shift", &v))
- bus->data_shift = v;
-
- bus->flags = flags;
-
-}
-
-/**
- * v4l2_of_parse_endpoint() - parse all endpoint node properties
- * @node: pointer to endpoint device_node
- * @endpoint: pointer to the V4L2 OF endpoint data structure
- *
- * All properties are optional. If none are found, we don't set any flags.
- * This means the port has a static configuration and no properties have
- * to be specified explicitly.
- * If any properties that identify the bus as parallel are found and
- * slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if we recognise
- * the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the
- * V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.
- * The caller should hold a reference to @node.
- */
-void v4l2_of_parse_endpoint(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint)
-{
- struct device_node *port_node = of_get_parent(node);
-
- memset(endpoint, 0, offsetof(struct v4l2_of_endpoint, head));
-
- endpoint->local_node = node;
- /*
- * It doesn't matter whether the two calls below succeed.
- * If they don't then the default value 0 is used.
- */
- of_property_read_u32(port_node, "reg", &endpoint->port);
- of_property_read_u32(node, "reg", &endpoint->id);
-
- v4l2_of_parse_csi_bus(node, endpoint);
- /*
- * Parse the parallel video bus properties only if none
- * of the MIPI CSI-2 specific properties were found.
- */
- if (endpoint->bus.mipi_csi2.flags == 0)
- v4l2_of_parse_parallel_bus(node, endpoint);
-
- of_node_put(port_node);
-}
-EXPORT_SYMBOL(v4l2_of_parse_endpoint);
-
-/**
- * v4l2_of_get_next_endpoint() - get next endpoint node
- * @parent: pointer to the parent device node
- * @prev: previous endpoint node, or NULL to get first
- *
- * Return: An 'endpoint' node pointer with refcount incremented. Refcount
- * of the passed @prev node is not decremented, the caller have to use
- * of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
- struct device_node *prev)
-{
- struct device_node *endpoint;
- struct device_node *port = NULL;
-
- if (!parent)
- return NULL;
-
- if (!prev) {
- struct device_node *node;
- /*
- * It's the first call, we have to find a port subnode
- * within this node or within an optional 'ports' node.
- */
- node = of_get_child_by_name(parent, "ports");
- if (node)
- parent = node;
-
- for_each_child_of_node(parent, node) {
- if (!of_node_cmp(node->name, "port")) {
- port = node;
- break;
- }
- }
- if (port) {
- /* Found a port, get an endpoint. */
- endpoint = of_get_next_child(port, NULL);
- of_node_put(port);
- } else {
- endpoint = NULL;
- }
-
- if (!endpoint)
- pr_err("%s(): no endpoint nodes specified for %s\n",
- __func__, parent->full_name);
- } else {
- port = of_get_parent(prev);
- if (!port)
- /* Hm, has someone given us the root node ?... */
- return NULL;
-
- /* Avoid dropping prev node refcount to 0. */
- of_node_get(prev);
- endpoint = of_get_next_child(port, prev);
- if (endpoint) {
- of_node_put(port);
- return endpoint;
- }
-
- /* No more endpoints under this port, try the next one. */
- do {
- port = of_get_next_child(parent, port);
- if (!port)
- return NULL;
- } while (of_node_cmp(port->name, "port"));
-
- /* Pick up the first endpoint in this port. */
- endpoint = of_get_next_child(port, NULL);
- of_node_put(port);
- }
-
- return endpoint;
-}
-EXPORT_SYMBOL(v4l2_of_get_next_endpoint);
-
-/**
- * v4l2_of_get_remote_port_parent() - get remote port's parent node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote device node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_remote_port_parent(
- const struct device_node *node)
-{
- struct device_node *np;
- unsigned int depth;
-
- /* Get remote endpoint node. */
- np = of_parse_phandle(node, "remote-endpoint", 0);
-
- /* Walk 3 levels up only if there is 'ports' node. */
- for (depth = 3; depth && np; depth--) {
- np = of_get_next_parent(np);
- if (depth == 2 && of_node_cmp(np->name, "ports"))
- break;
- }
- return np;
-}
-EXPORT_SYMBOL(v4l2_of_get_remote_port_parent);
-
-/**
- * v4l2_of_get_remote_port() - get remote port node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote port node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_remote_port(const struct device_node *node)
-{
- struct device_node *np;
-
- /* Get remote endpoint node. */
- np = of_parse_phandle(node, "remote-endpoint", 0);
- if (!np)
- return NULL;
- return of_get_parent(np);
-}
-EXPORT_SYMBOL(v4l2_of_get_remote_port);
diff --git a/drivers/media/v4l2-core/v4l2-spi.c b/drivers/media/v4l2-core/v4l2-spi.c
new file mode 100644
index 000000000000..1baf8e63f19e
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-spi.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * v4l2-spi - SPI helpers for Video4Linux2
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd)
+{
+ struct spi_device *spi = v4l2_get_subdevdata(sd);
+
+ if (spi && !spi->dev.of_node && !spi->dev.fwnode)
+ spi_unregister_device(spi);
+}
+
+void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
+ /* the owner is the same as the spi_device's driver owner */
+ sd->owner = spi->dev.driver->owner;
+ sd->dev = &spi->dev;
+ /* spi_device and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, spi);
+ spi_set_drvdata(spi, sd);
+ /* initialize name */
+ snprintf(sd->name, sizeof(sd->name), "%s %s",
+ spi->dev.driver->name, dev_name(&spi->dev));
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
+
+struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
+ struct spi_controller *ctlr,
+ struct spi_board_info *info)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct spi_device *spi = NULL;
+
+ if (!v4l2_dev)
+ return NULL;
+ if (info->modalias[0])
+ request_module(info->modalias);
+
+ spi = spi_new_device(ctlr, info);
+
+ if (!spi || !spi->dev.driver)
+ goto error;
+
+ if (!try_module_get(spi->dev.driver->owner))
+ goto error;
+
+ sd = spi_get_drvdata(spi);
+
+ /*
+ * Register with the v4l2_device which increases the module's
+ * use count as well.
+ */
+ if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner))
+ sd = NULL;
+
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(spi->dev.driver->owner);
+
+error:
+ /*
+ * If we have a client but no subdev, then something went wrong and
+ * we must unregister the client.
+ */
+ if (!sd)
+ spi_unregister_device(spi);
+
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-subdev-priv.h b/drivers/media/v4l2-core/v4l2-subdev-priv.h
new file mode 100644
index 000000000000..52391d6d8ab7
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-subdev-priv.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * V4L2 sub-device pivate header.
+ *
+ * Copyright (C) 2023 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef _V4L2_SUBDEV_PRIV_H_
+#define _V4L2_SUBDEV_PRIV_H_
+
+int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd);
+void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd);
+
+#endif
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 996c248dea42..25e66bf18f5f 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* V4L2 sub-device
*
@@ -5,49 +6,90 @@
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/export.h>
#include <linux/ioctl.h>
+#include <linux/leds.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/types.h>
+#include <linux/version.h>
#include <linux/videodev2.h>
-#include <linux/export.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+
+/**
+ * struct v4l2_subdev_stream_config - Used for storing stream configuration.
+ *
+ * @pad: pad number
+ * @stream: stream number
+ * @enabled: has the stream been enabled with v4l2_subdev_enable_streams()
+ * @fmt: &struct v4l2_mbus_framefmt
+ * @crop: &struct v4l2_rect to be used for crop
+ * @compose: &struct v4l2_rect to be used for compose
+ * @interval: frame interval
+ *
+ * This structure stores configuration for a stream.
+ */
+struct v4l2_subdev_stream_config {
+ u32 pad;
+ u32 stream;
+ bool enabled;
+
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ struct v4l2_fract interval;
+};
-static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
-{
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- fh->pad = kzalloc(sizeof(*fh->pad) * sd->entity.num_pads, GFP_KERNEL);
- if (fh->pad == NULL)
- return -ENOMEM;
+/*
+ * The Streams API is an experimental feature. To use the Streams API, set
+ * 'v4l2_subdev_enable_streams_api' to 1 below.
+ */
+
+static bool v4l2_subdev_enable_streams_api;
#endif
+
+/*
+ * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
+ * of streams.
+ *
+ * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
+ * restricts the total number of streams in a pad, although the stream ID is
+ * not restricted.
+ */
+#define V4L2_SUBDEV_MAX_STREAM_ID 63
+
+#include "v4l2-subdev-priv.h"
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
+{
+ struct v4l2_subdev_state *state;
+ static struct lock_class_key key;
+
+ state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ fh->state = state;
+
return 0;
}
static void subdev_fh_free(struct v4l2_subdev_fh *fh)
{
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- kfree(fh->pad);
- fh->pad = NULL;
-#endif
+ __v4l2_subdev_state_free(fh->state);
+ fh->state = NULL;
}
static int subdev_open(struct file *file)
@@ -55,9 +97,6 @@ static int subdev_open(struct file *file)
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_subdev_fh *subdev_fh;
-#if defined(CONFIG_MEDIA_CONTROLLER)
- struct media_entity *entity = NULL;
-#endif
int ret;
subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
@@ -71,17 +110,18 @@ static int subdev_open(struct file *file)
}
v4l2_fh_init(&subdev_fh->vfh, vdev);
- v4l2_fh_add(&subdev_fh->vfh);
- file->private_data = &subdev_fh->vfh;
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (sd->v4l2_dev->mdev) {
- entity = media_entity_get(&sd->entity);
- if (!entity) {
+ v4l2_fh_add(&subdev_fh->vfh, file);
+
+ if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
+ struct module *owner;
+
+ owner = sd->entity.graph_obj.mdev->dev->driver->owner;
+ if (!try_module_get(owner)) {
ret = -EBUSY;
goto err;
}
+ subdev_fh->owner = owner;
}
-#endif
if (sd->internal_ops && sd->internal_ops->open) {
ret = sd->internal_ops->open(sd, subdev_fh);
@@ -92,11 +132,8 @@ static int subdev_open(struct file *file)
return 0;
err:
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (entity)
- media_entity_put(entity);
-#endif
- v4l2_fh_del(&subdev_fh->vfh);
+ module_put(subdev_fh->owner);
+ v4l2_fh_del(&subdev_fh->vfh, file);
v4l2_fh_exit(&subdev_fh->vfh);
subdev_fh_free(subdev_fh);
kfree(subdev_fh);
@@ -108,54 +145,573 @@ static int subdev_close(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
if (sd->internal_ops && sd->internal_ops->close)
sd->internal_ops->close(sd, subdev_fh);
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (sd->v4l2_dev->mdev)
- media_entity_put(&sd->entity);
-#endif
- v4l2_fh_del(vfh);
+ module_put(subdev_fh->owner);
+ v4l2_fh_del(vfh, file);
v4l2_fh_exit(vfh);
subdev_fh_free(subdev_fh);
kfree(subdev_fh);
- file->private_data = NULL;
return 0;
}
+#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+static int subdev_open(struct file *file)
+{
+ return -ENODEV;
+}
+
+static int subdev_close(struct file *file)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
+static void v4l2_subdev_enable_privacy_led(struct v4l2_subdev *sd)
+{
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ if (!IS_ERR_OR_NULL(sd->privacy_led))
+ led_set_brightness(sd->privacy_led,
+ sd->privacy_led->max_brightness);
+#endif
+}
+
+static void v4l2_subdev_disable_privacy_led(struct v4l2_subdev *sd)
+{
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ if (!IS_ERR_OR_NULL(sd->privacy_led))
+ led_set_brightness(sd->privacy_led, 0);
+#endif
+}
+
+static inline int check_which(u32 which)
+{
+ if (which != V4L2_SUBDEV_FORMAT_TRY &&
+ which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (sd->entity.num_pads) {
+ if (pad >= sd->entity.num_pads)
+ return -EINVAL;
+ return 0;
+ }
+#endif
+ /* allow pad 0 on subdevices not registered as media entities */
+ if (pad > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
+ u32 which, u32 pad, u32 stream)
+{
+ if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ if (!v4l2_subdev_state_get_format(state, pad, stream))
+ return -EINVAL;
+ return 0;
+#else
+ return -EINVAL;
+#endif
+ }
+
+ if (stream != 0)
+ return -EINVAL;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int check_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ if (!format)
+ return -EINVAL;
+
+ return check_which(format->which) ? : check_pad(sd, format->pad) ? :
+ check_state(sd, state, format->which, format->pad, format->stream);
+}
+
+static int call_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ return check_format(sd, state, format) ? :
+ sd->ops->pad->get_fmt(sd, state, format);
+}
-static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+static int call_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ return check_format(sd, state, format) ? :
+ sd->ops->pad->set_fmt(sd, state, format);
+}
+
+static int call_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (!code)
+ return -EINVAL;
+
+ return check_which(code->which) ? : check_pad(sd, code->pad) ? :
+ check_state(sd, state, code->which, code->pad, code->stream) ? :
+ sd->ops->pad->enum_mbus_code(sd, state, code);
+}
+
+static int call_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (!fse)
+ return -EINVAL;
+
+ return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
+ check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
+ sd->ops->pad->enum_frame_size(sd, state, fse);
+}
+
+static int call_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ if (!fie)
+ return -EINVAL;
+
+ return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
+ check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
+ sd->ops->pad->enum_frame_interval(sd, state, fie);
+}
+
+static inline int check_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ if (!sel)
+ return -EINVAL;
+
+ return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
+ check_state(sd, state, sel->which, sel->pad, sel->stream);
+}
+
+static int call_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ return check_selection(sd, state, sel) ? :
+ sd->ops->pad->get_selection(sd, state, sel);
+}
+
+static int call_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ return check_selection(sd, state, sel) ? :
+ sd->ops->pad->set_selection(sd, state, sel);
+}
+
+static inline int check_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ if (!fi)
+ return -EINVAL;
+
+ return check_which(fi->which) ? : check_pad(sd, fi->pad) ? :
+ check_state(sd, state, fi->which, fi->pad, fi->stream);
+}
+
+static int call_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ return check_frame_interval(sd, state, fi) ? :
+ sd->ops->pad->get_frame_interval(sd, state, fi);
+}
+
+static int call_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ return check_frame_interval(sd, state, fi) ? :
+ sd->ops->pad->set_frame_interval(sd, state, fi);
+}
+
+static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ unsigned int i;
+ int ret;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
+ return -EOPNOTSUPP;
+#endif
+
+ memset(fd, 0, sizeof(*fd));
+
+ ret = sd->ops->pad->get_frame_desc(sd, pad, fd);
+ if (ret)
+ return ret;
+
+ dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad,
+ fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" :
+ fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" :
+ "unknown");
+
+ for (i = 0; i < fd->num_entries; i++) {
+ struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i];
+ char buf[20] = "";
+
+ if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
+ WARN_ON(snprintf(buf, sizeof(buf),
+ ", vc %u, dt 0x%02x",
+ entry->bus.csi2.vc,
+ entry->bus.csi2.dt) >= sizeof(buf));
+
+ dev_dbg(sd->dev,
+ "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n",
+ entry->stream, entry->pixelcode, entry->length,
+ entry->flags, buf);
+ }
+
+ return 0;
+}
+
+static inline int check_edid(struct v4l2_subdev *sd,
+ struct v4l2_subdev_edid *edid)
+{
+ if (!edid)
+ return -EINVAL;
+
+ if (edid->blocks && edid->edid == NULL)
+ return -EINVAL;
+
+ return check_pad(sd, edid->pad);
+}
+
+static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
+}
+
+static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
+}
+
+static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ if (!timings)
+ return -EINVAL;
+
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->s_dv_timings(sd, pad, timings);
+}
+
+static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ if (!timings)
+ return -EINVAL;
+
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->g_dv_timings(sd, pad, timings);
+}
+
+static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ if (!timings)
+ return -EINVAL;
+
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->query_dv_timings(sd, pad, timings);
+}
+
+static int call_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ if (!cap)
+ return -EINVAL;
+
+ return check_pad(sd, cap->pad) ? :
+ sd->ops->pad->dv_timings_cap(sd, cap);
+}
+
+static int call_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *dvt)
+{
+ if (!dvt)
+ return -EINVAL;
+
+ return check_pad(sd, dvt->pad) ? :
+ sd->ops->pad->enum_dv_timings(sd, dvt);
+}
+
+static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_config *config)
+{
+ memset(config, 0, sizeof(*config));
+
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->get_mbus_config(sd, pad, config);
+}
+
+static int call_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int ret;
+
+ /*
+ * The .s_stream() operation must never be called to start or stop an
+ * already started or stopped subdev. Catch offenders but don't return
+ * an error yet to avoid regressions.
+ */
+ if (WARN_ON(sd->s_stream_enabled == !!enable))
+ return 0;
+
+ ret = sd->ops->video->s_stream(sd, enable);
+
+ if (!enable && ret < 0) {
+ dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
+ ret = 0;
+ }
+
+ if (!ret) {
+ sd->s_stream_enabled = enable;
+
+ if (enable)
+ v4l2_subdev_enable_privacy_led(sd);
+ else
+ v4l2_subdev_disable_privacy_led(sd);
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+/*
+ * Create state-management wrapper for pad ops dealing with subdev state. The
+ * wrapper handles the case where the caller does not provide the called
+ * subdev's state. This should be removed when all the callers are fixed.
+ */
+#define DEFINE_STATE_WRAPPER(f, arg_type) \
+ static int call_##f##_state(struct v4l2_subdev *sd, \
+ struct v4l2_subdev_state *_state, \
+ arg_type *arg) \
+ { \
+ struct v4l2_subdev_state *state = _state; \
+ int ret; \
+ if (!_state) \
+ state = v4l2_subdev_lock_and_get_active_state(sd); \
+ ret = call_##f(sd, state, arg); \
+ if (!_state && state) \
+ v4l2_subdev_unlock_state(state); \
+ return ret; \
+ }
+
+#else /* CONFIG_MEDIA_CONTROLLER */
+
+#define DEFINE_STATE_WRAPPER(f, arg_type) \
+ static int call_##f##_state(struct v4l2_subdev *sd, \
+ struct v4l2_subdev_state *state, \
+ arg_type *arg) \
+ { \
+ return call_##f(sd, state, arg); \
+ }
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
+DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
+DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
+DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
+DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
+DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
+DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
+DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
+
+static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
+ .get_fmt = call_get_fmt_state,
+ .set_fmt = call_set_fmt_state,
+ .enum_mbus_code = call_enum_mbus_code_state,
+ .enum_frame_size = call_enum_frame_size_state,
+ .enum_frame_interval = call_enum_frame_interval_state,
+ .get_selection = call_get_selection_state,
+ .set_selection = call_set_selection_state,
+ .get_frame_interval = call_get_frame_interval,
+ .set_frame_interval = call_set_frame_interval,
+ .get_edid = call_get_edid,
+ .set_edid = call_set_edid,
+ .s_dv_timings = call_s_dv_timings,
+ .g_dv_timings = call_g_dv_timings,
+ .query_dv_timings = call_query_dv_timings,
+ .dv_timings_cap = call_dv_timings_cap,
+ .enum_dv_timings = call_enum_dv_timings,
+ .get_frame_desc = call_get_frame_desc,
+ .get_mbus_config = call_get_mbus_config,
+};
+
+static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
+ .s_stream = call_s_stream,
+};
+
+const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
+ .pad = &v4l2_subdev_call_pad_wrappers,
+ .video = &v4l2_subdev_call_video_wrappers,
+};
+EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+
+static struct v4l2_subdev_state *
+subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
+ unsigned int cmd, void *arg)
+{
+ u32 which;
+
+ switch (cmd) {
+ default:
+ return NULL;
+ case VIDIOC_SUBDEV_G_FMT:
+ case VIDIOC_SUBDEV_S_FMT:
+ which = ((struct v4l2_subdev_format *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_G_CROP:
+ case VIDIOC_SUBDEV_S_CROP:
+ which = ((struct v4l2_subdev_crop *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
+ which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
+ which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
+ which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_G_SELECTION:
+ case VIDIOC_SUBDEV_S_SELECTION:
+ which = ((struct v4l2_subdev_selection *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
+ case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (!(subdev_fh->client_caps &
+ V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH))
+ fi->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ which = fi->which;
+ break;
+ }
+ case VIDIOC_SUBDEV_G_ROUTING:
+ case VIDIOC_SUBDEV_S_ROUTING:
+ which = ((struct v4l2_subdev_routing *)arg)->which;
+ break;
+ }
+
+ return which == V4L2_SUBDEV_FORMAT_TRY ?
+ subdev_fh->state :
+ v4l2_subdev_get_unlocked_active_state(sd);
+}
+
+static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
+ struct v4l2_subdev_state *state)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *vfh = file->private_data;
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
-#endif
+ bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
+ bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
+ bool client_supports_streams = subdev_fh->client_caps &
+ V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+ int rval;
+
+ /*
+ * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
+ * Remove this when the API is no longer experimental.
+ */
+ if (!v4l2_subdev_enable_streams_api)
+ streams_subdev = false;
switch (cmd) {
+ case VIDIOC_SUBDEV_QUERYCAP: {
+ struct v4l2_subdev_capability *cap = arg;
+
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+ cap->version = LINUX_VERSION_CODE;
+ cap->capabilities =
+ (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
+ (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
+
+ return 0;
+ }
+
case VIDIOC_QUERYCTRL:
+ /*
+ * TODO: this really should be folded into v4l2_queryctrl (this
+ * currently returns -EINVAL for NULL control handlers).
+ * However, v4l2_queryctrl() is still called directly by
+ * drivers as well and until that has been addressed I believe
+ * it is safer to do the check here. The same is true for the
+ * other control ioctls below.
+ */
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
return v4l2_queryctrl(vfh->ctrl_handler, arg);
+ case VIDIOC_QUERY_EXT_CTRL:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
+
case VIDIOC_QUERYMENU:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
return v4l2_querymenu(vfh->ctrl_handler, arg);
case VIDIOC_G_CTRL:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
return v4l2_g_ctrl(vfh->ctrl_handler, arg);
case VIDIOC_S_CTRL:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
case VIDIOC_G_EXT_CTRLS:
- return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler,
+ vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_S_EXT_CTRLS:
- return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
+ vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_TRY_EXT_CTRLS:
- return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler,
+ vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
@@ -164,10 +720,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
case VIDIOC_SUBSCRIBE_EVENT:
- return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+ if (v4l2_subdev_has_op(sd, core, subscribe_event))
+ return v4l2_subdev_call(sd, core, subscribe_event,
+ vfh, arg);
+
+ if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) &&
+ vfh->ctrl_handler)
+ return v4l2_ctrl_subdev_subscribe_event(sd, vfh, arg);
+
+ return -ENOIOCTLCMD;
case VIDIOC_UNSUBSCRIBE_EVENT:
- return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+ if (v4l2_subdev_has_op(sd, core, unsubscribe_event))
+ return v4l2_subdev_call(sd, core, unsubscribe_event,
+ vfh, arg);
+
+ if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)
+ return v4l2_event_subdev_unsubscribe(sd, vfh, arg);
+
+ return -ENOIOCTLCMD;
#ifdef CONFIG_VIDEO_ADV_DEBUG
case VIDIOC_DBG_G_REGISTER:
@@ -186,6 +757,19 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return -EPERM;
return v4l2_subdev_call(sd, core, s_register, p);
}
+ case VIDIOC_DBG_G_CHIP_INFO:
+ {
+ struct v4l2_dbg_chip_info *p = arg;
+
+ if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
+ return -EINVAL;
+ if (sd->ops->core && sd->ops->core->s_register)
+ p->flags |= V4L2_CHIP_FL_WRITABLE;
+ if (sd->ops->core && sd->ops->core->g_register)
+ p->flags |= V4L2_CHIP_FL_READABLE;
+ strscpy(p->name, sd->name, sizeof(p->name));
+ return 0;
+ }
#endif
case VIDIOC_LOG_STATUS: {
@@ -199,56 +783,47 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return ret;
}
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
case VIDIOC_SUBDEV_G_FMT: {
struct v4l2_subdev_format *format = arg;
- if (format->which != V4L2_SUBDEV_FORMAT_TRY &&
- format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
+ if (!client_supports_streams)
+ format->stream = 0;
- if (format->pad >= sd->entity.num_pads)
- return -EINVAL;
-
- return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh, format);
+ memset(format->reserved, 0, sizeof(format->reserved));
+ memset(format->format.reserved, 0, sizeof(format->format.reserved));
+ return v4l2_subdev_call(sd, pad, get_fmt, state, format);
}
case VIDIOC_SUBDEV_S_FMT: {
struct v4l2_subdev_format *format = arg;
- if (format->which != V4L2_SUBDEV_FORMAT_TRY &&
- format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
+ if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
- if (format->pad >= sd->entity.num_pads)
- return -EINVAL;
+ if (!client_supports_streams)
+ format->stream = 0;
- return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh, format);
+ memset(format->reserved, 0, sizeof(format->reserved));
+ memset(format->format.reserved, 0, sizeof(format->format.reserved));
+ return v4l2_subdev_call(sd, pad, set_fmt, state, format);
}
case VIDIOC_SUBDEV_G_CROP: {
struct v4l2_subdev_crop *crop = arg;
struct v4l2_subdev_selection sel;
- int rval;
- if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
- crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (crop->pad >= sd->entity.num_pads)
- return -EINVAL;
-
- rval = v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
- if (rval != -ENOIOCTLCMD)
- return rval;
+ if (!client_supports_streams)
+ crop->stream = 0;
+ memset(crop->reserved, 0, sizeof(crop->reserved));
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
+ sel.stream = crop->stream;
sel.target = V4L2_SEL_TGT_CROP;
rval = v4l2_subdev_call(
- sd, pad, get_selection, subdev_fh, &sel);
+ sd, pad, get_selection, state, &sel);
crop->rect = sel.r;
@@ -258,27 +833,23 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_CROP: {
struct v4l2_subdev_crop *crop = arg;
struct v4l2_subdev_selection sel;
- int rval;
- if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
- crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (crop->pad >= sd->entity.num_pads)
- return -EINVAL;
+ if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
- rval = v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
- if (rval != -ENOIOCTLCMD)
- return rval;
+ if (!client_supports_streams)
+ crop->stream = 0;
+ memset(crop->reserved, 0, sizeof(crop->reserved));
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
+ sel.stream = crop->stream;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = crop->rect;
rval = v4l2_subdev_call(
- sd, pad, set_selection, subdev_fh, &sel);
+ sd, pad, set_selection, state, &sel);
crop->rect = sel.r;
@@ -288,73 +859,286 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
struct v4l2_subdev_mbus_code_enum *code = arg;
- if (code->pad >= sd->entity.num_pads)
- return -EINVAL;
+ if (!client_supports_streams)
+ code->stream = 0;
- return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh,
+ memset(code->reserved, 0, sizeof(code->reserved));
+ return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
code);
}
case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
struct v4l2_subdev_frame_size_enum *fse = arg;
- if (fse->pad >= sd->entity.num_pads)
- return -EINVAL;
+ if (!client_supports_streams)
+ fse->stream = 0;
- return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh,
+ memset(fse->reserved, 0, sizeof(fse->reserved));
+ return v4l2_subdev_call(sd, pad, enum_frame_size, state,
fse);
}
- case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
- return v4l2_subdev_call(sd, video, g_frame_interval, arg);
+ case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (!client_supports_streams)
+ fi->stream = 0;
- case VIDIOC_SUBDEV_S_FRAME_INTERVAL:
- return v4l2_subdev_call(sd, video, s_frame_interval, arg);
+ memset(fi->reserved, 0, sizeof(fi->reserved));
+ return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi);
+ }
+
+ case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (!client_supports_streams)
+ fi->stream = 0;
+
+ if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
+ memset(fi->reserved, 0, sizeof(fi->reserved));
+ return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi);
+ }
case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval_enum *fie = arg;
- if (fie->pad >= sd->entity.num_pads)
- return -EINVAL;
+ if (!client_supports_streams)
+ fie->stream = 0;
- return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh,
+ memset(fie->reserved, 0, sizeof(fie->reserved));
+ return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
fie);
}
case VIDIOC_SUBDEV_G_SELECTION: {
struct v4l2_subdev_selection *sel = arg;
- if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
- sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (sel->pad >= sd->entity.num_pads)
- return -EINVAL;
+ if (!client_supports_streams)
+ sel->stream = 0;
+ memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
- sd, pad, get_selection, subdev_fh, sel);
+ sd, pad, get_selection, state, sel);
}
case VIDIOC_SUBDEV_S_SELECTION: {
struct v4l2_subdev_selection *sel = arg;
- if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
- sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
+ if (!client_supports_streams)
+ sel->stream = 0;
+
+ memset(sel->reserved, 0, sizeof(sel->reserved));
+ return v4l2_subdev_call(
+ sd, pad, set_selection, state, sel);
+ }
+
+ case VIDIOC_G_EDID: {
+ struct v4l2_subdev_edid *edid = arg;
+
+ return v4l2_subdev_call(sd, pad, get_edid, edid);
+ }
+
+ case VIDIOC_S_EDID: {
+ struct v4l2_subdev_edid *edid = arg;
+
+ return v4l2_subdev_call(sd, pad, set_edid, edid);
+ }
+
+ case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
+ struct v4l2_dv_timings_cap *cap = arg;
+
+ return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
+ }
+
+ case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
+ struct v4l2_enum_dv_timings *dvt = arg;
+
+ return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
+ }
+
+ case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
+ return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg);
+
+ case VIDIOC_SUBDEV_G_DV_TIMINGS:
+ return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg);
+
+ case VIDIOC_SUBDEV_S_DV_TIMINGS:
+ if (ro_subdev)
+ return -EPERM;
+
+ return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg);
+
+ case VIDIOC_SUBDEV_G_STD:
+ return v4l2_subdev_call(sd, video, g_std, arg);
+
+ case VIDIOC_SUBDEV_S_STD: {
+ v4l2_std_id *std = arg;
+
+ if (ro_subdev)
+ return -EPERM;
+
+ return v4l2_subdev_call(sd, video, s_std, *std);
+ }
+
+ case VIDIOC_SUBDEV_ENUMSTD: {
+ struct v4l2_standard *p = arg;
+ v4l2_std_id id;
+
+ if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
return -EINVAL;
- if (sel->pad >= sd->entity.num_pads)
+ return v4l_video_std_enumstd(p, id);
+ }
+
+ case VIDIOC_SUBDEV_QUERYSTD:
+ return v4l2_subdev_call(sd, video, querystd, arg);
+
+ case VIDIOC_SUBDEV_G_ROUTING: {
+ struct v4l2_subdev_routing *routing = arg;
+ struct v4l2_subdev_krouting *krouting;
+
+ if (!v4l2_subdev_enable_streams_api)
+ return -ENOIOCTLCMD;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
+ return -ENOIOCTLCMD;
+
+ memset(routing->reserved, 0, sizeof(routing->reserved));
+
+ krouting = &state->routing;
+
+ memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
+ krouting->routes,
+ min(krouting->num_routes, routing->len_routes) *
+ sizeof(*krouting->routes));
+ routing->num_routes = krouting->num_routes;
+
+ return 0;
+ }
+
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *routing = arg;
+ struct v4l2_subdev_route *routes =
+ (struct v4l2_subdev_route *)(uintptr_t)routing->routes;
+ struct v4l2_subdev_krouting krouting = {};
+ unsigned int num_active_routes = 0;
+ unsigned int i;
+
+ if (!v4l2_subdev_enable_streams_api)
+ return -ENOIOCTLCMD;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
+ return -ENOIOCTLCMD;
+
+ if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
+ if (routing->num_routes > routing->len_routes)
return -EINVAL;
- return v4l2_subdev_call(
- sd, pad, set_selection, subdev_fh, sel);
+ memset(routing->reserved, 0, sizeof(routing->reserved));
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ const struct v4l2_subdev_route *route = &routes[i];
+ const struct media_pad *pads = sd->entity.pads;
+
+ if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
+ route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
+ return -EINVAL;
+
+ if (route->sink_pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(pads[route->sink_pad].flags &
+ MEDIA_PAD_FL_SINK))
+ return -EINVAL;
+
+ if (route->source_pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(pads[route->source_pad].flags &
+ MEDIA_PAD_FL_SOURCE))
+ return -EINVAL;
+
+ if (route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)
+ num_active_routes++;
+ }
+
+ /*
+ * Drivers that implement routing need to report a frame
+ * descriptor accordingly, with up to one entry per route. Until
+ * the frame descriptors entries get allocated dynamically,
+ * limit the number of active routes to
+ * V4L2_FRAME_DESC_ENTRY_MAX.
+ */
+ if (num_active_routes > V4L2_FRAME_DESC_ENTRY_MAX)
+ return -E2BIG;
+
+ /*
+ * If the driver doesn't support setting routing, just return
+ * the routing table.
+ */
+ if (!v4l2_subdev_has_op(sd, pad, set_routing)) {
+ memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
+ state->routing.routes,
+ min(state->routing.num_routes, routing->len_routes) *
+ sizeof(*state->routing.routes));
+ routing->num_routes = state->routing.num_routes;
+
+ return 0;
+ }
+
+ krouting.num_routes = routing->num_routes;
+ krouting.len_routes = routing->len_routes;
+ krouting.routes = routes;
+
+ rval = v4l2_subdev_call(sd, pad, set_routing, state,
+ routing->which, &krouting);
+ if (rval < 0)
+ return rval;
+
+ memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
+ state->routing.routes,
+ min(state->routing.num_routes, routing->len_routes) *
+ sizeof(*state->routing.routes));
+ routing->num_routes = state->routing.num_routes;
+
+ return 0;
}
- case VIDIOC_SUBDEV_G_EDID:
- return v4l2_subdev_call(sd, pad, get_edid, arg);
+ case VIDIOC_SUBDEV_G_CLIENT_CAP: {
+ struct v4l2_subdev_client_capability *client_cap = arg;
+
+ client_cap->capabilities = subdev_fh->client_caps;
+
+ return 0;
+ }
+
+ case VIDIOC_SUBDEV_S_CLIENT_CAP: {
+ struct v4l2_subdev_client_capability *client_cap = arg;
+
+ /*
+ * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not
+ * enabled. Remove this when streams API is no longer
+ * experimental.
+ */
+ if (!v4l2_subdev_enable_streams_api)
+ client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+
+ /* Filter out unsupported capabilities */
+ client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS |
+ V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH);
+
+ subdev_fh->client_caps = client_cap->capabilities;
+
+ return 0;
+ }
- case VIDIOC_SUBDEV_S_EDID:
- return v4l2_subdev_call(sd, pad, set_edid, arg);
-#endif
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
}
@@ -362,25 +1146,83 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return 0;
}
+static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->lock;
+ long ret = -ENODEV;
+
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+
+ if (video_is_registered(vdev)) {
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+ struct v4l2_subdev_state *state;
+
+ state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
+
+ if (state)
+ v4l2_subdev_lock_state(state);
+
+ ret = subdev_do_ioctl(file, cmd, arg, state);
+
+ if (state)
+ v4l2_subdev_unlock_state(state);
+ }
+
+ if (lock)
+ mutex_unlock(lock);
+ return ret;
+}
+
static long subdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- return video_usercopy(file, cmd, arg, subdev_do_ioctl);
+ return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
}
-static unsigned int subdev_poll(struct file *file, poll_table *wait)
+#ifdef CONFIG_COMPAT
+static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
+}
+#endif
+
+#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+static long subdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENODEV;
+}
+
+#ifdef CONFIG_COMPAT
+static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENODEV;
+}
+#endif
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
+static __poll_t subdev_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
- return POLLERR;
+ return EPOLLERR;
poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
- return POLLPRI;
+ return EPOLLPRI;
return 0;
}
@@ -389,72 +1231,1324 @@ const struct v4l2_file_operations v4l2_subdev_fops = {
.owner = THIS_MODULE,
.open = subdev_open,
.unlocked_ioctl = subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = subdev_compat_ioctl32,
+#endif
.release = subdev_close,
.poll = subdev_poll,
};
#ifdef CONFIG_MEDIA_CONTROLLER
+
+int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
+ struct fwnode_endpoint *endpoint)
+{
+ struct fwnode_handle *fwnode;
+ struct v4l2_subdev *sd;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ return -EINVAL;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
+ fwnode_handle_put(fwnode);
+
+ if (device_match_fwnode(sd->dev, fwnode))
+ return endpoint->port;
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
+
int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
struct media_link *link,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
- if (source_fmt->format.width != sink_fmt->format.width
- || source_fmt->format.height != sink_fmt->format.height
- || source_fmt->format.code != sink_fmt->format.code)
+ bool pass = true;
+
+ /* The width, height and code must match. */
+ if (source_fmt->format.width != sink_fmt->format.width) {
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: width does not match (source %u, sink %u)\n",
+ __func__,
+ source_fmt->format.width, sink_fmt->format.width);
+ pass = false;
+ }
+
+ if (source_fmt->format.height != sink_fmt->format.height) {
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: height does not match (source %u, sink %u)\n",
+ __func__,
+ source_fmt->format.height, sink_fmt->format.height);
+ pass = false;
+ }
+
+ if (source_fmt->format.code != sink_fmt->format.code) {
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
+ __func__,
+ source_fmt->format.code, sink_fmt->format.code);
+ pass = false;
+ }
+
+ /* The field order must match, or the sink field order must be NONE
+ * to support interlaced hardware connected to bridges that support
+ * progressive formats only.
+ */
+ if (source_fmt->format.field != sink_fmt->format.field &&
+ sink_fmt->format.field != V4L2_FIELD_NONE) {
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: field does not match (source %u, sink %u)\n",
+ __func__,
+ source_fmt->format.field, sink_fmt->format.field);
+ pass = false;
+ }
+
+ if (pass)
+ return 0;
+
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+
+ return -EPIPE;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
+
+static int
+v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
+ struct v4l2_subdev_format *fmt,
+ bool states_locked)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+ fmt->stream = stream;
+
+ if (states_locked)
+ state = v4l2_subdev_get_locked_active_state(sd);
+ else
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
+
+ if (!states_locked && state)
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+
+static void __v4l2_link_validate_get_streams(struct media_pad *pad,
+ u64 *streams_mask,
+ bool states_locked)
+{
+ struct v4l2_subdev_route *route;
+ struct v4l2_subdev_state *state;
+ struct v4l2_subdev *subdev;
+
+ subdev = media_entity_to_v4l2_subdev(pad->entity);
+
+ *streams_mask = 0;
+
+ if (states_locked)
+ state = v4l2_subdev_get_locked_active_state(subdev);
+ else
+ state = v4l2_subdev_lock_and_get_active_state(subdev);
+
+ if (WARN_ON(!state))
+ return;
+
+ for_each_active_route(&state->routing, route) {
+ u32 route_pad;
+ u32 route_stream;
+
+ if (pad->flags & MEDIA_PAD_FL_SOURCE) {
+ route_pad = route->source_pad;
+ route_stream = route->source_stream;
+ } else {
+ route_pad = route->sink_pad;
+ route_stream = route->sink_stream;
+ }
+
+ if (route_pad != pad->index)
+ continue;
+
+ *streams_mask |= BIT_ULL(route_stream);
+ }
+
+ if (!states_locked)
+ v4l2_subdev_unlock_state(state);
+}
+
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
+static void v4l2_link_validate_get_streams(struct media_pad *pad,
+ u64 *streams_mask,
+ bool states_locked)
+{
+ struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
+ /* Non-streams subdevs have an implicit stream 0 */
+ *streams_mask = BIT_ULL(0);
+ return;
+ }
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ __v4l2_link_validate_get_streams(pad, streams_mask, states_locked);
+#else
+ /* This shouldn't happen */
+ *streams_mask = 0;
+#endif
+}
+
+static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked)
+{
+ struct v4l2_subdev *sink_subdev =
+ media_entity_to_v4l2_subdev(link->sink->entity);
+ struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
+ u64 source_streams_mask;
+ u64 sink_streams_mask;
+ u64 dangling_sink_streams;
+ u32 stream;
+ int ret;
+
+ dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+
+ v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked);
+ v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked);
+
+ /*
+ * It is ok to have more source streams than sink streams as extra
+ * source streams can just be ignored by the receiver, but having extra
+ * sink streams is an error as streams must have a source.
+ */
+ dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
+ sink_streams_mask;
+ if (dangling_sink_streams) {
+ dev_err(dev, "Dangling sink streams: mask %#llx\n",
+ dangling_sink_streams);
return -EINVAL;
+ }
+
+ /* Validate source and sink stream formats */
+
+ for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
+ struct v4l2_subdev_format sink_fmt, source_fmt;
+
+ if (!(sink_streams_mask & BIT_ULL(stream)))
+ continue;
+
+ dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
+ link->source->entity->name, link->source->index, stream,
+ link->sink->entity->name, link->sink->index, stream);
+
+ ret = v4l2_subdev_link_validate_get_format(link->source, stream,
+ &source_fmt, states_locked);
+ if (ret < 0) {
+ dev_dbg(dev,
+ "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
+ link->source->entity->name, link->source->index,
+ stream);
+ continue;
+ }
+
+ ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
+ &sink_fmt, states_locked);
+ if (ret < 0) {
+ dev_dbg(dev,
+ "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
+ link->sink->entity->name, link->sink->index,
+ stream);
+ continue;
+ }
+
+ /* TODO: add stream number to link_validate() */
+ ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
+ &source_fmt, &sink_fmt);
+ if (!ret)
+ continue;
+
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ ret = v4l2_subdev_link_validate_default(sink_subdev, link,
+ &source_fmt, &sink_fmt);
+
+ if (ret)
+ return ret;
+ }
return 0;
}
-EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
+
+int v4l2_subdev_link_validate(struct media_link *link)
+{
+ struct v4l2_subdev *source_sd, *sink_sd;
+ struct v4l2_subdev_state *source_state, *sink_state;
+ bool states_locked;
+ int ret;
+
+ /*
+ * Links are validated in the context of the sink entity. Usage of this
+ * helper on a sink that is not a subdev is a clear driver bug.
+ */
+ if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity)))
+ return -EINVAL;
+
+ /*
+ * If the source is a video device, delegate link validation to it. This
+ * allows usage of this helper for subdev connected to a video output
+ * device, provided that the driver implement the video output device's
+ * .link_validate() operation.
+ */
+ if (is_media_entity_v4l2_video_device(link->source->entity)) {
+ struct media_entity *source = link->source->entity;
+
+ if (!source->ops || !source->ops->link_validate) {
+ /*
+ * Many existing drivers do not implement the required
+ * .link_validate() operation for their video devices.
+ * Print a warning to get the drivers fixed, and return
+ * 0 to avoid breaking userspace. This should
+ * eventually be turned into a WARN_ON() when all
+ * drivers will have been fixed.
+ */
+ pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n",
+ source->name);
+ return 0;
+ }
+
+ /*
+ * Avoid infinite loops in case a video device incorrectly uses
+ * this helper function as its .link_validate() handler.
+ */
+ if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate))
+ return -EINVAL;
+
+ return source->ops->link_validate(link);
+ }
+
+ /*
+ * If the source is still not a subdev, usage of this helper is a clear
+ * driver bug.
+ */
+ if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity)))
+ return -EINVAL;
+
+ sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
+ source_sd = media_entity_to_v4l2_subdev(link->source->entity);
+
+ sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
+ source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
+
+ states_locked = sink_state && source_state;
+
+ if (states_locked)
+ v4l2_subdev_lock_states(sink_state, source_state);
+
+ ret = v4l2_subdev_link_validate_locked(link, states_locked);
+
+ if (states_locked)
+ v4l2_subdev_unlock_states(sink_state, source_state);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
+
+bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
+ unsigned int pad0, unsigned int pad1)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct v4l2_subdev_krouting *routing;
+ struct v4l2_subdev_state *state;
+ unsigned int i;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ routing = &state->routing;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
+ (route->source_pad == pad0 && route->sink_pad == pad1)) {
+ v4l2_subdev_unlock_state(state);
+ return true;
+ }
+ }
+
+ v4l2_subdev_unlock_state(state);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
+
+struct v4l2_subdev_state *
+__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
+ struct lock_class_key *lock_key)
+{
+ struct v4l2_subdev_state *state;
+ int ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ __mutex_init(&state->_lock, lock_name, lock_key);
+ if (sd->state_lock)
+ state->lock = sd->state_lock;
+ else
+ state->lock = &state->_lock;
+
+ state->sd = sd;
+
+ /* Drivers that support streams do not need the legacy pad config */
+ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
+ state->pads = kvcalloc(sd->entity.num_pads,
+ sizeof(*state->pads), GFP_KERNEL);
+ if (!state->pads) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ if (sd->internal_ops && sd->internal_ops->init_state) {
+ /*
+ * There can be no race at this point, but we lock the state
+ * anyway to satisfy lockdep checks.
+ */
+ v4l2_subdev_lock_state(state);
+ ret = sd->internal_ops->init_state(sd, state);
+ v4l2_subdev_unlock_state(state);
+
+ if (ret)
+ goto err;
+ }
+
+ return state;
+
+err:
+ if (state && state->pads)
+ kvfree(state->pads);
+
+ kfree(state);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
+
+void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
+{
+ if (!state)
+ return;
+
+ mutex_destroy(&state->_lock);
+
+ kfree(state->routing.routes);
+ kvfree(state->stream_configs.configs);
+ kvfree(state->pads);
+ kfree(state);
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
+
+int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
+ struct lock_class_key *key)
+{
+ struct v4l2_subdev_state *state;
+ struct device *dev = sd->dev;
+ bool has_disable_streams;
+ bool has_enable_streams;
+ bool has_s_stream;
+
+ /* Check that the subdevice implements the required features */
+
+ has_s_stream = v4l2_subdev_has_op(sd, video, s_stream);
+ has_enable_streams = v4l2_subdev_has_op(sd, pad, enable_streams);
+ has_disable_streams = v4l2_subdev_has_op(sd, pad, disable_streams);
+
+ if (has_enable_streams != has_disable_streams) {
+ dev_err(dev,
+ "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n",
+ sd->name);
+ return -EINVAL;
+ }
+
+ if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
+ if (has_s_stream && !has_enable_streams) {
+ dev_err(dev,
+ "subdev '%s' must implement .enable/disable_streams()\n",
+ sd->name);
+
+ return -EINVAL;
+ }
+ }
+
+ if (sd->ctrl_handler)
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ state = __v4l2_subdev_state_alloc(sd, name, key);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ sd->active_state = state;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
+
+void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
+
+ __v4l2_subdev_state_free(sd->active_state);
+ sd->active_state = NULL;
+
+ /* Uninitialised sub-device, bail out here. */
+ if (!sd->async_subdev_endpoint_list.next)
+ return;
+
+ list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
+ async_subdev_endpoint_entry) {
+ list_del(&ase->async_subdev_endpoint_entry);
+
+ kfree(ase);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
+
+struct v4l2_mbus_framefmt *
+__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(!state))
+ return NULL;
+
+ if (state->pads) {
+ if (stream)
+ return NULL;
+
+ if (pad >= state->sd->entity.num_pads)
+ return NULL;
+
+ return &state->pads[pad].format;
+ }
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format);
+
+struct v4l2_rect *
+__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
+ u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(!state))
+ return NULL;
+
+ if (state->pads) {
+ if (stream)
+ return NULL;
+
+ if (pad >= state->sd->entity.num_pads)
+ return NULL;
+
+ return &state->pads[pad].crop;
+ }
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].crop;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop);
+
+struct v4l2_rect *
+__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(!state))
+ return NULL;
+
+ if (state->pads) {
+ if (stream)
+ return NULL;
+
+ if (pad >= state->sd->entity.num_pads)
+ return NULL;
+
+ return &state->pads[pad].compose;
+ }
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].compose;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose);
+
+struct v4l2_fract *
+__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ if (WARN_ON(!state))
+ return NULL;
+
+ lockdep_assert_held(state->lock);
+
+ if (state->pads) {
+ if (stream)
+ return NULL;
+
+ if (pad >= state->sd->entity.num_pads)
+ return NULL;
+
+ return &state->pads[pad].interval;
+ }
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].interval;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval);
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static int
-v4l2_subdev_link_validate_get_format(struct media_pad *pad,
- struct v4l2_subdev_format *fmt)
+v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
+ const struct v4l2_subdev_krouting *routing)
+{
+ struct v4l2_subdev_stream_configs new_configs = { 0 };
+ struct v4l2_subdev_route *route;
+ u32 idx;
+
+ /* Count number of formats needed */
+ for_each_active_route(routing, route) {
+ /*
+ * Each route needs a format on both ends of the route.
+ */
+ new_configs.num_configs += 2;
+ }
+
+ if (new_configs.num_configs) {
+ new_configs.configs = kvcalloc(new_configs.num_configs,
+ sizeof(*new_configs.configs),
+ GFP_KERNEL);
+
+ if (!new_configs.configs)
+ return -ENOMEM;
+ }
+
+ /*
+ * Fill in the 'pad' and stream' value for each item in the array from
+ * the routing table
+ */
+ idx = 0;
+
+ for_each_active_route(routing, route) {
+ new_configs.configs[idx].pad = route->sink_pad;
+ new_configs.configs[idx].stream = route->sink_stream;
+
+ idx++;
+
+ new_configs.configs[idx].pad = route->source_pad;
+ new_configs.configs[idx].stream = route->source_stream;
+
+ idx++;
+ }
+
+ kvfree(stream_configs->configs);
+ *stream_configs = new_configs;
+
+ return 0;
+}
+
+int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ format->format = *fmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
+
+int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct v4l2_fract *interval;
+
+ interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream);
+ if (!interval)
+ return -EINVAL;
+
+ fi->interval = *interval;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval);
+
+int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ const struct v4l2_subdev_krouting *routing)
+{
+ struct v4l2_subdev_krouting *dst = &state->routing;
+ const struct v4l2_subdev_krouting *src = routing;
+ struct v4l2_subdev_krouting new_routing = { 0 };
+ size_t bytes;
+ int r;
+
+ if (unlikely(check_mul_overflow((size_t)src->num_routes,
+ sizeof(*src->routes), &bytes)))
+ return -EOVERFLOW;
+
+ lockdep_assert_held(state->lock);
+
+ if (src->num_routes > 0) {
+ new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
+ if (!new_routing.routes)
+ return -ENOMEM;
+ }
+
+ new_routing.num_routes = src->num_routes;
+
+ r = v4l2_subdev_init_stream_configs(&state->stream_configs,
+ &new_routing);
+ if (r) {
+ kfree(new_routing.routes);
+ return r;
+ }
+
+ kfree(dst->routes);
+ *dst = new_routing;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
+
+struct v4l2_subdev_route *
+__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
+ struct v4l2_subdev_route *route)
{
- if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) {
- struct v4l2_subdev *sd =
- media_entity_to_v4l2_subdev(pad->entity);
+ if (route)
+ ++route;
+ else
+ route = &routing->routes[0];
- fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt->pad = pad->index;
- return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ for (; route < routing->routes + routing->num_routes; ++route) {
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ return route;
}
- WARN(pad->entity->type != MEDIA_ENT_T_DEVNODE_V4L,
- "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
- pad->entity->type, pad->entity->name);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
+
+int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ const struct v4l2_subdev_krouting *routing,
+ const struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+ int ret;
+
+ ret = v4l2_subdev_set_routing(sd, state, routing);
+ if (ret)
+ return ret;
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i)
+ stream_configs->configs[i].fmt = *fmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
+
+int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
+ u32 pad, u32 stream, u32 *other_pad,
+ u32 *other_stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+
+ if (route->source_pad == pad &&
+ route->source_stream == stream) {
+ if (other_pad)
+ *other_pad = route->sink_pad;
+ if (other_stream)
+ *other_stream = route->sink_stream;
+ return 0;
+ }
+
+ if (route->sink_pad == pad && route->sink_stream == stream) {
+ if (other_pad)
+ *other_pad = route->source_pad;
+ if (other_stream)
+ *other_stream = route->source_stream;
+ return 0;
+ }
+ }
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
-int v4l2_subdev_link_validate(struct media_link *link)
+struct v4l2_mbus_framefmt *
+v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
+ u32 pad, u32 stream)
{
- struct v4l2_subdev *sink;
- struct v4l2_subdev_format sink_fmt, source_fmt;
- int rval;
+ u32 other_pad, other_stream;
+ int ret;
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
+ pad, stream,
+ &other_pad, &other_stream);
+ if (ret)
+ return NULL;
+
+ return v4l2_subdev_state_get_format(state, other_pad, other_stream);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
+
+u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
+ u32 pad0, u32 pad1, u64 *streams)
+{
+ const struct v4l2_subdev_krouting *routing = &state->routing;
+ struct v4l2_subdev_route *route;
+ u64 streams0 = 0;
+ u64 streams1 = 0;
+
+ for_each_active_route(routing, route) {
+ if (route->sink_pad == pad0 && route->source_pad == pad1 &&
+ (*streams & BIT_ULL(route->sink_stream))) {
+ streams0 |= BIT_ULL(route->sink_stream);
+ streams1 |= BIT_ULL(route->source_stream);
+ }
+ if (route->source_pad == pad0 && route->sink_pad == pad1 &&
+ (*streams & BIT_ULL(route->source_stream))) {
+ streams0 |= BIT_ULL(route->source_stream);
+ streams1 |= BIT_ULL(route->sink_stream);
+ }
+ }
+
+ *streams = streams0;
+ return streams1;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
+
+int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
+ const struct v4l2_subdev_krouting *routing,
+ enum v4l2_subdev_routing_restriction disallow)
+{
+ u32 *remote_pads = NULL;
+ unsigned int i, j;
+ int ret = -EINVAL;
+
+ if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
+ V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
+ remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
+ GFP_KERNEL);
+ if (!remote_pads)
+ return -ENOMEM;
+
+ for (i = 0; i < sd->entity.num_pads; ++i)
+ remote_pads[i] = U32_MAX;
+ }
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ const struct v4l2_subdev_route *route = &routing->routes[i];
+
+ /* Validate the sink and source pad numbers. */
+ if (route->sink_pad >= sd->entity.num_pads ||
+ !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
+ dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
+ i, route->sink_pad);
+ goto out;
+ }
+
+ if (route->source_pad >= sd->entity.num_pads ||
+ !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
+ dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
+ i, route->source_pad);
+ goto out;
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a
+ * sink pad must be routed to a single source pad.
+ */
+ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) {
+ if (remote_pads[route->sink_pad] != U32_MAX &&
+ remote_pads[route->sink_pad] != route->source_pad) {
+ dev_dbg(sd->dev,
+ "route %u attempts to mix %s streams\n",
+ i, "sink");
+ goto out;
+ }
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a
+ * source pad must originate from a single sink pad.
+ */
+ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) {
+ if (remote_pads[route->source_pad] != U32_MAX &&
+ remote_pads[route->source_pad] != route->sink_pad) {
+ dev_dbg(sd->dev,
+ "route %u attempts to mix %s streams\n",
+ i, "source");
+ goto out;
+ }
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink
+ * side can not do stream multiplexing, i.e. there can be only
+ * a single stream in a sink pad.
+ */
+ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) {
+ if (remote_pads[route->sink_pad] != U32_MAX) {
+ dev_dbg(sd->dev,
+ "route %u attempts to multiplex on %s pad %u\n",
+ i, "sink", route->sink_pad);
+ goto out;
+ }
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the
+ * source side can not do stream multiplexing, i.e. there can
+ * be only a single stream in a source pad.
+ */
+ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) {
+ if (remote_pads[route->source_pad] != U32_MAX) {
+ dev_dbg(sd->dev,
+ "route %u attempts to multiplex on %s pad %u\n",
+ i, "source", route->source_pad);
+ goto out;
+ }
+ }
+
+ if (remote_pads) {
+ remote_pads[route->sink_pad] = route->source_pad;
+ remote_pads[route->source_pad] = route->sink_pad;
+ }
+
+ for (j = i + 1; j < routing->num_routes; ++j) {
+ const struct v4l2_subdev_route *r = &routing->routes[j];
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
+ * originate from the same (sink) stream.
+ */
+ if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
+ route->sink_pad == r->sink_pad &&
+ route->sink_stream == r->sink_stream) {
+ dev_dbg(sd->dev,
+ "routes %u and %u originate from same sink (%u/%u)\n",
+ i, j, route->sink_pad,
+ route->sink_stream);
+ goto out;
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
+ * at the same (source) stream.
+ */
+ if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
+ route->source_pad == r->source_pad &&
+ route->source_stream == r->source_stream) {
+ dev_dbg(sd->dev,
+ "routes %u and %u end at same source (%u/%u)\n",
+ i, j, route->source_pad,
+ route->source_stream);
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+
+out:
+ kfree(remote_pads);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
+
+static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask,
+ u64 *found_streams,
+ u64 *enabled_streams)
+{
+ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) {
+ *found_streams = BIT_ULL(0);
+ *enabled_streams =
+ (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0;
+ dev_dbg(sd->dev,
+ "collect_streams: sub-device \"%s\" does not support streams\n",
+ sd->entity.name);
+ return;
+ }
+
+ *found_streams = 0;
+ *enabled_streams = 0;
- rval = v4l2_subdev_link_validate_get_format(
- link->source, &source_fmt);
- if (rval < 0)
+ for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
+ const struct v4l2_subdev_stream_config *cfg =
+ &state->stream_configs.configs[i];
+
+ if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
+ continue;
+
+ *found_streams |= BIT_ULL(cfg->stream);
+ if (cfg->enabled)
+ *enabled_streams |= BIT_ULL(cfg->stream);
+ }
+
+ dev_dbg(sd->dev,
+ "collect_streams: \"%s\":%u: found %#llx enabled %#llx\n",
+ sd->entity.name, pad, *found_streams, *enabled_streams);
+}
+
+static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask,
+ bool enabled)
+{
+ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) {
+ if (enabled)
+ sd->enabled_pads |= BIT_ULL(pad);
+ else
+ sd->enabled_pads &= ~BIT_ULL(pad);
+ return;
+ }
+
+ for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
+ struct v4l2_subdev_stream_config *cfg =
+ &state->stream_configs.configs[i];
+
+ if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
+ cfg->enabled = enabled;
+ }
+}
+
+int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask)
+{
+ struct device *dev = sd->entity.graph_obj.mdev->dev;
+ struct v4l2_subdev_state *state;
+ bool already_streaming;
+ u64 enabled_streams;
+ u64 found_streams;
+ bool use_s_stream;
+ int ret;
+
+ dev_dbg(dev, "enable streams \"%s\":%u/%#llx\n", sd->entity.name, pad,
+ streams_mask);
+
+ /* A few basic sanity checks first. */
+ if (pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
+ return -EOPNOTSUPP;
+
+ /*
+ * We use a 64-bit bitmask for tracking enabled pads, so only subdevices
+ * with 64 pads or less can be supported.
+ */
+ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
+ return -EOPNOTSUPP;
+
+ if (!streams_mask)
return 0;
- rval = v4l2_subdev_link_validate_get_format(
- link->sink, &sink_fmt);
- if (rval < 0)
+ /* Fallback on .s_stream() if .enable_streams() isn't available. */
+ use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams);
+
+ if (!use_s_stream)
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ else
+ state = NULL;
+
+ /*
+ * Verify that the requested streams exist and that they are not
+ * already enabled.
+ */
+
+ v4l2_subdev_collect_streams(sd, state, pad, streams_mask,
+ &found_streams, &enabled_streams);
+
+ if (found_streams != streams_mask) {
+ dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
+ streams_mask & ~found_streams, sd->entity.name, pad);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (enabled_streams) {
+ dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n",
+ enabled_streams, sd->entity.name, pad);
+ ret = -EALREADY;
+ goto done;
+ }
+
+ already_streaming = v4l2_subdev_is_streaming(sd);
+
+ if (!use_s_stream) {
+ /* Call the .enable_streams() operation. */
+ ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
+ streams_mask);
+ } else {
+ /* Start streaming when the first pad is enabled. */
+ if (!already_streaming)
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ else
+ ret = 0;
+ }
+
+ if (ret) {
+ dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
+ streams_mask, ret);
+ goto done;
+ }
+
+ /* Mark the streams as enabled. */
+ v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true);
+
+ /*
+ * TODO: When all the drivers have been changed to use
+ * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(),
+ * instead of calling .s_stream() operation directly, we can remove
+ * the privacy LED handling from call_s_stream() and do it here
+ * for all cases.
+ */
+ if (!use_s_stream && !already_streaming)
+ v4l2_subdev_enable_privacy_led(sd);
+
+done:
+ if (!use_s_stream)
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
+
+int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask)
+{
+ struct device *dev = sd->entity.graph_obj.mdev->dev;
+ struct v4l2_subdev_state *state;
+ u64 enabled_streams;
+ u64 found_streams;
+ bool use_s_stream;
+ int ret;
+
+ dev_dbg(dev, "disable streams \"%s\":%u/%#llx\n", sd->entity.name, pad,
+ streams_mask);
+
+ /* A few basic sanity checks first. */
+ if (pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
+ return -EOPNOTSUPP;
+
+ /*
+ * We use a 64-bit bitmask for tracking enabled pads, so only subdevices
+ * with 64 pads or less can be supported.
+ */
+ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
+ return -EOPNOTSUPP;
+
+ if (!streams_mask)
return 0;
- sink = media_entity_to_v4l2_subdev(link->sink->entity);
+ /* Fallback on .s_stream() if .disable_streams() isn't available. */
+ use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams);
- rval = v4l2_subdev_call(sink, pad, link_validate, link,
- &source_fmt, &sink_fmt);
- if (rval != -ENOIOCTLCMD)
- return rval;
+ if (!use_s_stream)
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ else
+ state = NULL;
- return v4l2_subdev_link_validate_default(
- sink, link, &source_fmt, &sink_fmt);
+ /*
+ * Verify that the requested streams exist and that they are not
+ * already disabled.
+ */
+
+ v4l2_subdev_collect_streams(sd, state, pad, streams_mask,
+ &found_streams, &enabled_streams);
+
+ if (found_streams != streams_mask) {
+ dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
+ streams_mask & ~found_streams, sd->entity.name, pad);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (enabled_streams != streams_mask) {
+ dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n",
+ streams_mask & ~enabled_streams, sd->entity.name, pad);
+ ret = -EALREADY;
+ goto done;
+ }
+
+ if (!use_s_stream) {
+ /* Call the .disable_streams() operation. */
+ ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
+ streams_mask);
+ } else {
+ /* Stop streaming when the last streams are disabled. */
+
+ if (!(sd->enabled_pads & ~BIT_ULL(pad)))
+ ret = v4l2_subdev_call(sd, video, s_stream, 0);
+ else
+ ret = 0;
+ }
+
+ if (ret) {
+ dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
+ streams_mask, ret);
+ goto done;
+ }
+
+ v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, false);
+
+done:
+ if (!use_s_stream) {
+ if (!v4l2_subdev_is_streaming(sd))
+ v4l2_subdev_disable_privacy_led(sd);
+
+ v4l2_subdev_unlock_state(state);
+ }
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
+EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
+
+int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_subdev_route *route;
+ struct media_pad *pad;
+ u64 source_mask = 0;
+ int pad_index = -1;
+
+ /*
+ * Find the source pad. This helper is meant for subdevs that have a
+ * single source pad, so failures shouldn't happen, but catch them
+ * loudly nonetheless as they indicate a driver bug.
+ */
+ media_entity_for_each_pad(&sd->entity, pad) {
+ if (pad->flags & MEDIA_PAD_FL_SOURCE) {
+ pad_index = pad->index;
+ break;
+ }
+ }
+
+ if (WARN_ON(pad_index == -1))
+ return -EINVAL;
+
+ if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
+ /*
+ * As there's a single source pad, just collect all the source
+ * streams.
+ */
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ for_each_active_route(&state->routing, route)
+ source_mask |= BIT_ULL(route->source_stream);
+
+ v4l2_subdev_unlock_state(state);
+ } else {
+ /*
+ * For non-streams subdevices, there's a single implicit stream
+ * per pad.
+ */
+ source_mask = BIT_ULL(0);
+ }
+
+ if (enable)
+ return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
+ else
+ return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
+
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
#endif /* CONFIG_MEDIA_CONTROLLER */
void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
@@ -468,9 +2562,78 @@ void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
sd->grp_id = 0;
sd->dev_priv = NULL;
sd->host_priv = NULL;
+ sd->privacy_led = NULL;
+ INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
#if defined(CONFIG_MEDIA_CONTROLLER)
sd->entity.name = sd->name;
- sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
+ sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
#endif
}
EXPORT_SYMBOL(v4l2_subdev_init);
+
+void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
+ const struct v4l2_event *ev)
+{
+ v4l2_event_queue(sd->devnode, ev);
+ v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
+
+bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd)
+{
+ struct v4l2_subdev_state *state;
+
+ if (!v4l2_subdev_has_op(sd, pad, enable_streams))
+ return sd->s_stream_enabled;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
+ return !!sd->enabled_pads;
+
+ state = v4l2_subdev_get_locked_active_state(sd);
+
+ for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
+ const struct v4l2_subdev_stream_config *cfg;
+
+ cfg = &state->stream_configs.configs[i];
+
+ if (cfg->enabled)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming);
+
+int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
+{
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ sd->privacy_led = led_get(sd->dev, "privacy");
+ if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
+ return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
+ "getting privacy LED\n");
+
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ mutex_lock(&sd->privacy_led->led_access);
+ led_sysfs_disable(sd->privacy_led);
+ led_trigger_remove(sd->privacy_led);
+ led_set_brightness(sd->privacy_led, 0);
+ mutex_unlock(&sd->privacy_led->led_access);
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
+
+void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
+{
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ mutex_lock(&sd->privacy_led->led_access);
+ led_sysfs_enable(sd->privacy_led);
+ mutex_unlock(&sd->privacy_led->led_access);
+ led_put(sd->privacy_led);
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
diff --git a/drivers/media/v4l2-core/v4l2-trace.c b/drivers/media/v4l2-core/v4l2-trace.c
new file mode 100644
index 000000000000..95f3b02e1f84
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <media/v4l2-common.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/v4l2.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_dqbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_qbuf);
diff --git a/drivers/media/v4l2-core/v4l2-vp9.c b/drivers/media/v4l2-core/v4l2-vp9.c
new file mode 100644
index 000000000000..859589f1fd35
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-vp9.c
@@ -0,0 +1,1850 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 VP9 helpers.
+ *
+ * Copyright (C) 2021 Collabora, Ltd.
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+ */
+
+#include <linux/module.h>
+
+#include <media/v4l2-vp9.h>
+
+const u8 v4l2_vp9_kf_y_mode_prob[10][10][9] = {
+ {
+ /* above = dc */
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, /*left = dc */
+ { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, /*left = v */
+ { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, /*left = h */
+ { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, /*left = d45 */
+ { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, /*left = d135*/
+ { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, /*left = d117*/
+ { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, /*left = d153*/
+ { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, /*left = d207*/
+ { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, /*left = d63 */
+ { 59, 67, 44, 140, 161, 202, 78, 67, 119 }, /*left = tm */
+ }, { /* above = v */
+ { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, /*left = dc */
+ { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, /*left = v */
+ { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, /*left = h */
+ { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, /*left = d45 */
+ { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, /*left = d135*/
+ { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, /*left = d117*/
+ { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, /*left = d153*/
+ { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, /*left = d207*/
+ { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, /*left = d63 */
+ { 36, 61, 116, 114, 128, 162, 80, 125, 82 }, /*left = tm */
+ }, { /* above = h */
+ { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, /*left = dc */
+ { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, /*left = v */
+ { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, /*left = h */
+ { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, /*left = d45 */
+ { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, /*left = d135*/
+ { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, /*left = d117*/
+ { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, /*left = d153*/
+ { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, /*left = d207*/
+ { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, /*left = d63 */
+ { 38, 72, 19, 168, 203, 212, 50, 50, 107 }, /*left = tm */
+ }, { /* above = d45 */
+ { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, /*left = dc */
+ { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, /*left = v */
+ { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, /*left = h */
+ { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, /*left = d45 */
+ { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, /*left = d135*/
+ { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, /*left = d117*/
+ { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, /*left = d153*/
+ { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, /*left = d207*/
+ { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, /*left = d63 */
+ { 47, 47, 43, 114, 137, 181, 100, 99, 95 }, /*left = tm */
+ }, { /* above = d135 */
+ { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, /*left = dc */
+ { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, /*left = v */
+ { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, /*left = h */
+ { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, /*left = d45 */
+ { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, /*left = d135*/
+ { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, /*left = d117*/
+ { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, /*left = d153*/
+ { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, /*left = d207*/
+ { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, /*left = d63 */
+ { 47, 54, 34, 146, 108, 203, 72, 103, 151 }, /*left = tm */
+ }, { /* above = d117 */
+ { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, /*left = dc */
+ { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, /*left = v */
+ { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, /*left = h */
+ { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, /*left = d45 */
+ { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, /*left = d135*/
+ { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, /*left = d117*/
+ { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, /*left = d153*/
+ { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, /*left = d207*/
+ { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, /*left = d63 */
+ { 38, 44, 51, 136, 74, 162, 57, 97, 121 }, /*left = tm */
+ }, { /* above = d153 */
+ { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, /*left = dc */
+ { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, /*left = v */
+ { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, /*left = h */
+ { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, /*left = d45 */
+ { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, /*left = d135*/
+ { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, /*left = d117*/
+ { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, /*left = d153*/
+ { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, /*left = d207*/
+ { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, /*left = d63 */
+ { 37, 49, 25, 129, 168, 164, 41, 54, 148 }, /*left = tm */
+ }, { /* above = d207 */
+ { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, /*left = dc */
+ { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, /*left = v */
+ { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, /*left = h */
+ { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, /*left = d45 */
+ { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, /*left = d135*/
+ { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, /*left = d117*/
+ { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, /*left = d153*/
+ { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, /*left = d207*/
+ { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, /*left = d63 */
+ { 40, 61, 26, 126, 152, 206, 61, 59, 93 }, /*left = tm */
+ }, { /* above = d63 */
+ { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, /*left = dc */
+ { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, /*left = v */
+ { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, /*left = h */
+ { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, /*left = d45 */
+ { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, /*left = d135*/
+ { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, /*left = d117*/
+ { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, /*left = d153*/
+ { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, /*left = d207*/
+ { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, /*left = d63 */
+ { 36, 38, 48, 92, 122, 165, 88, 137, 91 }, /*left = tm */
+ }, { /* above = tm */
+ { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, /*left = dc */
+ { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, /*left = v */
+ { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, /*left = h */
+ { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, /*left = d45 */
+ { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, /*left = d135*/
+ { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, /*left = d117*/
+ { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, /*left = d153*/
+ { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, /*left = d207*/
+ { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, /*left = d63 */
+ { 43, 81, 53, 140, 169, 204, 68, 84, 72 }, /*left = tm */
+ }
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_kf_y_mode_prob);
+
+const u8 v4l2_vp9_kf_partition_probs[16][3] = {
+ /* 8x8 -> 4x4 */
+ { 158, 97, 94 }, /* a/l both not split */
+ { 93, 24, 99 }, /* a split, l not split */
+ { 85, 119, 44 }, /* l split, a not split */
+ { 62, 59, 67 }, /* a/l both split */
+ /* 16x16 -> 8x8 */
+ { 149, 53, 53 }, /* a/l both not split */
+ { 94, 20, 48 }, /* a split, l not split */
+ { 83, 53, 24 }, /* l split, a not split */
+ { 52, 18, 18 }, /* a/l both split */
+ /* 32x32 -> 16x16 */
+ { 150, 40, 39 }, /* a/l both not split */
+ { 78, 12, 26 }, /* a split, l not split */
+ { 67, 33, 11 }, /* l split, a not split */
+ { 24, 7, 5 }, /* a/l both split */
+ /* 64x64 -> 32x32 */
+ { 174, 35, 49 }, /* a/l both not split */
+ { 68, 11, 27 }, /* a split, l not split */
+ { 57, 15, 9 }, /* l split, a not split */
+ { 12, 3, 3 }, /* a/l both split */
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_kf_partition_probs);
+
+const u8 v4l2_vp9_kf_uv_mode_prob[10][9] = {
+ { 144, 11, 54, 157, 195, 130, 46, 58, 108 }, /* y = dc */
+ { 118, 15, 123, 148, 131, 101, 44, 93, 131 }, /* y = v */
+ { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, /* y = h */
+ { 120, 11, 50, 123, 163, 135, 64, 77, 103 }, /* y = d45 */
+ { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, /* y = d135 */
+ { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, /* y = d117 */
+ { 115, 9, 28, 141, 161, 167, 21, 25, 193 }, /* y = d153 */
+ { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, /* y = d207 */
+ { 116, 12, 64, 120, 140, 125, 49, 115, 121 }, /* y = d63 */
+ { 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_kf_uv_mode_prob);
+
+const struct v4l2_vp9_frame_context v4l2_vp9_default_probs = {
+ .tx8 = {
+ { 100 },
+ { 66 },
+ },
+ .tx16 = {
+ { 20, 152 },
+ { 15, 101 },
+ },
+ .tx32 = {
+ { 3, 136, 37 },
+ { 5, 52, 13 },
+ },
+ .coef = {
+ { /* tx = 4x4 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 195, 29, 183 },
+ { 84, 49, 136 },
+ { 8, 42, 71 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 31, 107, 169 },
+ { 35, 99, 159 },
+ { 17, 82, 140 },
+ { 8, 66, 114 },
+ { 2, 44, 76 },
+ { 1, 19, 32 },
+ },
+ { /* Coeff Band 2 */
+ { 40, 132, 201 },
+ { 29, 114, 187 },
+ { 13, 91, 157 },
+ { 7, 75, 127 },
+ { 3, 58, 95 },
+ { 1, 28, 47 },
+ },
+ { /* Coeff Band 3 */
+ { 69, 142, 221 },
+ { 42, 122, 201 },
+ { 15, 91, 159 },
+ { 6, 67, 121 },
+ { 1, 42, 77 },
+ { 1, 17, 31 },
+ },
+ { /* Coeff Band 4 */
+ { 102, 148, 228 },
+ { 67, 117, 204 },
+ { 17, 82, 154 },
+ { 6, 59, 114 },
+ { 2, 39, 75 },
+ { 1, 15, 29 },
+ },
+ { /* Coeff Band 5 */
+ { 156, 57, 233 },
+ { 119, 57, 212 },
+ { 58, 48, 163 },
+ { 29, 40, 124 },
+ { 12, 30, 81 },
+ { 3, 12, 31 }
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 191, 107, 226 },
+ { 124, 117, 204 },
+ { 25, 99, 155 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 29, 148, 210 },
+ { 37, 126, 194 },
+ { 8, 93, 157 },
+ { 2, 68, 118 },
+ { 1, 39, 69 },
+ { 1, 17, 33 },
+ },
+ { /* Coeff Band 2 */
+ { 41, 151, 213 },
+ { 27, 123, 193 },
+ { 3, 82, 144 },
+ { 1, 58, 105 },
+ { 1, 32, 60 },
+ { 1, 13, 26 },
+ },
+ { /* Coeff Band 3 */
+ { 59, 159, 220 },
+ { 23, 126, 198 },
+ { 4, 88, 151 },
+ { 1, 66, 114 },
+ { 1, 38, 71 },
+ { 1, 18, 34 },
+ },
+ { /* Coeff Band 4 */
+ { 114, 136, 232 },
+ { 51, 114, 207 },
+ { 11, 83, 155 },
+ { 3, 56, 105 },
+ { 1, 33, 65 },
+ { 1, 17, 34 },
+ },
+ { /* Coeff Band 5 */
+ { 149, 65, 234 },
+ { 121, 57, 215 },
+ { 61, 49, 166 },
+ { 28, 36, 114 },
+ { 12, 25, 76 },
+ { 3, 16, 42 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 214, 49, 220 },
+ { 132, 63, 188 },
+ { 42, 65, 137 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 85, 137, 221 },
+ { 104, 131, 216 },
+ { 49, 111, 192 },
+ { 21, 87, 155 },
+ { 2, 49, 87 },
+ { 1, 16, 28 },
+ },
+ { /* Coeff Band 2 */
+ { 89, 163, 230 },
+ { 90, 137, 220 },
+ { 29, 100, 183 },
+ { 10, 70, 135 },
+ { 2, 42, 81 },
+ { 1, 17, 33 },
+ },
+ { /* Coeff Band 3 */
+ { 108, 167, 237 },
+ { 55, 133, 222 },
+ { 15, 97, 179 },
+ { 4, 72, 135 },
+ { 1, 45, 85 },
+ { 1, 19, 38 },
+ },
+ { /* Coeff Band 4 */
+ { 124, 146, 240 },
+ { 66, 124, 224 },
+ { 17, 88, 175 },
+ { 4, 58, 122 },
+ { 1, 36, 75 },
+ { 1, 18, 37 },
+ },
+ { /* Coeff Band 5 */
+ { 141, 79, 241 },
+ { 126, 70, 227 },
+ { 66, 58, 182 },
+ { 30, 44, 136 },
+ { 12, 34, 96 },
+ { 2, 20, 47 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 229, 99, 249 },
+ { 143, 111, 235 },
+ { 46, 109, 192 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 82, 158, 236 },
+ { 94, 146, 224 },
+ { 25, 117, 191 },
+ { 9, 87, 149 },
+ { 3, 56, 99 },
+ { 1, 33, 57 },
+ },
+ { /* Coeff Band 2 */
+ { 83, 167, 237 },
+ { 68, 145, 222 },
+ { 10, 103, 177 },
+ { 2, 72, 131 },
+ { 1, 41, 79 },
+ { 1, 20, 39 },
+ },
+ { /* Coeff Band 3 */
+ { 99, 167, 239 },
+ { 47, 141, 224 },
+ { 10, 104, 178 },
+ { 2, 73, 133 },
+ { 1, 44, 85 },
+ { 1, 22, 47 },
+ },
+ { /* Coeff Band 4 */
+ { 127, 145, 243 },
+ { 71, 129, 228 },
+ { 17, 93, 177 },
+ { 3, 61, 124 },
+ { 1, 41, 84 },
+ { 1, 21, 52 },
+ },
+ { /* Coeff Band 5 */
+ { 157, 78, 244 },
+ { 140, 72, 231 },
+ { 69, 58, 184 },
+ { 31, 44, 137 },
+ { 14, 38, 105 },
+ { 8, 23, 61 },
+ },
+ },
+ },
+ },
+ { /* tx = 8x8 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 125, 34, 187 },
+ { 52, 41, 133 },
+ { 6, 31, 56 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 37, 109, 153 },
+ { 51, 102, 147 },
+ { 23, 87, 128 },
+ { 8, 67, 101 },
+ { 1, 41, 63 },
+ { 1, 19, 29 },
+ },
+ { /* Coeff Band 2 */
+ { 31, 154, 185 },
+ { 17, 127, 175 },
+ { 6, 96, 145 },
+ { 2, 73, 114 },
+ { 1, 51, 82 },
+ { 1, 28, 45 },
+ },
+ { /* Coeff Band 3 */
+ { 23, 163, 200 },
+ { 10, 131, 185 },
+ { 2, 93, 148 },
+ { 1, 67, 111 },
+ { 1, 41, 69 },
+ { 1, 14, 24 },
+ },
+ { /* Coeff Band 4 */
+ { 29, 176, 217 },
+ { 12, 145, 201 },
+ { 3, 101, 156 },
+ { 1, 69, 111 },
+ { 1, 39, 63 },
+ { 1, 14, 23 },
+ },
+ { /* Coeff Band 5 */
+ { 57, 192, 233 },
+ { 25, 154, 215 },
+ { 6, 109, 167 },
+ { 3, 78, 118 },
+ { 1, 48, 69 },
+ { 1, 21, 29 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 202, 105, 245 },
+ { 108, 106, 216 },
+ { 18, 90, 144 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 33, 172, 219 },
+ { 64, 149, 206 },
+ { 14, 117, 177 },
+ { 5, 90, 141 },
+ { 2, 61, 95 },
+ { 1, 37, 57 },
+ },
+ { /* Coeff Band 2 */
+ { 33, 179, 220 },
+ { 11, 140, 198 },
+ { 1, 89, 148 },
+ { 1, 60, 104 },
+ { 1, 33, 57 },
+ { 1, 12, 21 },
+ },
+ { /* Coeff Band 3 */
+ { 30, 181, 221 },
+ { 8, 141, 198 },
+ { 1, 87, 145 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 20 },
+ },
+ { /* Coeff Band 4 */
+ { 32, 186, 224 },
+ { 7, 142, 198 },
+ { 1, 86, 143 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 22 },
+ },
+ { /* Coeff Band 5 */
+ { 57, 192, 227 },
+ { 20, 143, 204 },
+ { 3, 96, 154 },
+ { 1, 68, 112 },
+ { 1, 42, 69 },
+ { 1, 19, 32 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 212, 35, 215 },
+ { 113, 47, 169 },
+ { 29, 48, 105 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 74, 129, 203 },
+ { 106, 120, 203 },
+ { 49, 107, 178 },
+ { 19, 84, 144 },
+ { 4, 50, 84 },
+ { 1, 15, 25 },
+ },
+ { /* Coeff Band 2 */
+ { 71, 172, 217 },
+ { 44, 141, 209 },
+ { 15, 102, 173 },
+ { 6, 76, 133 },
+ { 2, 51, 89 },
+ { 1, 24, 42 },
+ },
+ { /* Coeff Band 3 */
+ { 64, 185, 231 },
+ { 31, 148, 216 },
+ { 8, 103, 175 },
+ { 3, 74, 131 },
+ { 1, 46, 81 },
+ { 1, 18, 30 },
+ },
+ { /* Coeff Band 4 */
+ { 65, 196, 235 },
+ { 25, 157, 221 },
+ { 5, 105, 174 },
+ { 1, 67, 120 },
+ { 1, 38, 69 },
+ { 1, 15, 30 },
+ },
+ { /* Coeff Band 5 */
+ { 65, 204, 238 },
+ { 30, 156, 224 },
+ { 7, 107, 177 },
+ { 2, 70, 124 },
+ { 1, 42, 73 },
+ { 1, 18, 34 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 225, 86, 251 },
+ { 144, 104, 235 },
+ { 42, 99, 181 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 85, 175, 239 },
+ { 112, 165, 229 },
+ { 29, 136, 200 },
+ { 12, 103, 162 },
+ { 6, 77, 123 },
+ { 2, 53, 84 },
+ },
+ { /* Coeff Band 2 */
+ { 75, 183, 239 },
+ { 30, 155, 221 },
+ { 3, 106, 171 },
+ { 1, 74, 128 },
+ { 1, 44, 76 },
+ { 1, 17, 28 },
+ },
+ { /* Coeff Band 3 */
+ { 73, 185, 240 },
+ { 27, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 75, 127 },
+ { 1, 42, 73 },
+ { 1, 17, 29 },
+ },
+ { /* Coeff Band 4 */
+ { 62, 190, 238 },
+ { 21, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 72, 122 },
+ { 1, 40, 71 },
+ { 1, 18, 32 },
+ },
+ { /* Coeff Band 5 */
+ { 61, 199, 240 },
+ { 27, 161, 226 },
+ { 4, 113, 180 },
+ { 1, 76, 129 },
+ { 1, 46, 80 },
+ { 1, 23, 41 },
+ },
+ },
+ },
+ },
+ { /* tx = 16x16 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 27, 153 },
+ { 5, 30, 95 },
+ { 1, 16, 30 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 50, 75, 127 },
+ { 57, 75, 124 },
+ { 27, 67, 108 },
+ { 10, 54, 86 },
+ { 1, 33, 52 },
+ { 1, 12, 18 },
+ },
+ { /* Coeff Band 2 */
+ { 43, 125, 151 },
+ { 26, 108, 148 },
+ { 7, 83, 122 },
+ { 2, 59, 89 },
+ { 1, 38, 60 },
+ { 1, 17, 27 },
+ },
+ { /* Coeff Band 3 */
+ { 23, 144, 163 },
+ { 13, 112, 154 },
+ { 2, 75, 117 },
+ { 1, 50, 81 },
+ { 1, 31, 51 },
+ { 1, 14, 23 },
+ },
+ { /* Coeff Band 4 */
+ { 18, 162, 185 },
+ { 6, 123, 171 },
+ { 1, 78, 125 },
+ { 1, 51, 86 },
+ { 1, 31, 54 },
+ { 1, 14, 23 },
+ },
+ { /* Coeff Band 5 */
+ { 15, 199, 227 },
+ { 3, 150, 204 },
+ { 1, 91, 146 },
+ { 1, 55, 95 },
+ { 1, 30, 53 },
+ { 1, 11, 20 },
+ }
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 19, 55, 240 },
+ { 19, 59, 196 },
+ { 3, 52, 105 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 41, 166, 207 },
+ { 104, 153, 199 },
+ { 31, 123, 181 },
+ { 14, 101, 152 },
+ { 5, 72, 106 },
+ { 1, 36, 52 },
+ },
+ { /* Coeff Band 2 */
+ { 35, 176, 211 },
+ { 12, 131, 190 },
+ { 2, 88, 144 },
+ { 1, 60, 101 },
+ { 1, 36, 60 },
+ { 1, 16, 28 },
+ },
+ { /* Coeff Band 3 */
+ { 28, 183, 213 },
+ { 8, 134, 191 },
+ { 1, 86, 142 },
+ { 1, 56, 96 },
+ { 1, 30, 53 },
+ { 1, 12, 20 },
+ },
+ { /* Coeff Band 4 */
+ { 20, 190, 215 },
+ { 4, 135, 192 },
+ { 1, 84, 139 },
+ { 1, 53, 91 },
+ { 1, 28, 49 },
+ { 1, 11, 20 },
+ },
+ { /* Coeff Band 5 */
+ { 13, 196, 216 },
+ { 2, 137, 192 },
+ { 1, 86, 143 },
+ { 1, 57, 99 },
+ { 1, 32, 56 },
+ { 1, 13, 24 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 211, 29, 217 },
+ { 96, 47, 156 },
+ { 22, 43, 87 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 78, 120, 193 },
+ { 111, 116, 186 },
+ { 46, 102, 164 },
+ { 15, 80, 128 },
+ { 2, 49, 76 },
+ { 1, 18, 28 },
+ },
+ { /* Coeff Band 2 */
+ { 71, 161, 203 },
+ { 42, 132, 192 },
+ { 10, 98, 150 },
+ { 3, 69, 109 },
+ { 1, 44, 70 },
+ { 1, 18, 29 },
+ },
+ { /* Coeff Band 3 */
+ { 57, 186, 211 },
+ { 30, 140, 196 },
+ { 4, 93, 146 },
+ { 1, 62, 102 },
+ { 1, 38, 65 },
+ { 1, 16, 27 },
+ },
+ { /* Coeff Band 4 */
+ { 47, 199, 217 },
+ { 14, 145, 196 },
+ { 1, 88, 142 },
+ { 1, 57, 98 },
+ { 1, 36, 62 },
+ { 1, 15, 26 },
+ },
+ { /* Coeff Band 5 */
+ { 26, 219, 229 },
+ { 5, 155, 207 },
+ { 1, 94, 151 },
+ { 1, 60, 104 },
+ { 1, 36, 62 },
+ { 1, 16, 28 },
+ }
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 233, 29, 248 },
+ { 146, 47, 220 },
+ { 43, 52, 140 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 100, 163, 232 },
+ { 179, 161, 222 },
+ { 63, 142, 204 },
+ { 37, 113, 174 },
+ { 26, 89, 137 },
+ { 18, 68, 97 },
+ },
+ { /* Coeff Band 2 */
+ { 85, 181, 230 },
+ { 32, 146, 209 },
+ { 7, 100, 164 },
+ { 3, 71, 121 },
+ { 1, 45, 77 },
+ { 1, 18, 30 },
+ },
+ { /* Coeff Band 3 */
+ { 65, 187, 230 },
+ { 20, 148, 207 },
+ { 2, 97, 159 },
+ { 1, 68, 116 },
+ { 1, 40, 70 },
+ { 1, 14, 29 },
+ },
+ { /* Coeff Band 4 */
+ { 40, 194, 227 },
+ { 8, 147, 204 },
+ { 1, 94, 155 },
+ { 1, 65, 112 },
+ { 1, 39, 66 },
+ { 1, 14, 26 },
+ },
+ { /* Coeff Band 5 */
+ { 16, 208, 228 },
+ { 3, 151, 207 },
+ { 1, 98, 160 },
+ { 1, 67, 117 },
+ { 1, 41, 74 },
+ { 1, 17, 31 },
+ },
+ },
+ },
+ },
+ { /* tx = 32x32 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 17, 38, 140 },
+ { 7, 34, 80 },
+ { 1, 17, 29 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 37, 75, 128 },
+ { 41, 76, 128 },
+ { 26, 66, 116 },
+ { 12, 52, 94 },
+ { 2, 32, 55 },
+ { 1, 10, 16 },
+ },
+ { /* Coeff Band 2 */
+ { 50, 127, 154 },
+ { 37, 109, 152 },
+ { 16, 82, 121 },
+ { 5, 59, 85 },
+ { 1, 35, 54 },
+ { 1, 13, 20 },
+ },
+ { /* Coeff Band 3 */
+ { 40, 142, 167 },
+ { 17, 110, 157 },
+ { 2, 71, 112 },
+ { 1, 44, 72 },
+ { 1, 27, 45 },
+ { 1, 11, 17 },
+ },
+ { /* Coeff Band 4 */
+ { 30, 175, 188 },
+ { 9, 124, 169 },
+ { 1, 74, 116 },
+ { 1, 48, 78 },
+ { 1, 30, 49 },
+ { 1, 11, 18 },
+ },
+ { /* Coeff Band 5 */
+ { 10, 222, 223 },
+ { 2, 150, 194 },
+ { 1, 83, 128 },
+ { 1, 48, 79 },
+ { 1, 27, 45 },
+ { 1, 11, 17 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 36, 41, 235 },
+ { 29, 36, 193 },
+ { 10, 27, 111 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 85, 165, 222 },
+ { 177, 162, 215 },
+ { 110, 135, 195 },
+ { 57, 113, 168 },
+ { 23, 83, 120 },
+ { 10, 49, 61 },
+ },
+ { /* Coeff Band 2 */
+ { 85, 190, 223 },
+ { 36, 139, 200 },
+ { 5, 90, 146 },
+ { 1, 60, 103 },
+ { 1, 38, 65 },
+ { 1, 18, 30 },
+ },
+ { /* Coeff Band 3 */
+ { 72, 202, 223 },
+ { 23, 141, 199 },
+ { 2, 86, 140 },
+ { 1, 56, 97 },
+ { 1, 36, 61 },
+ { 1, 16, 27 },
+ },
+ { /* Coeff Band 4 */
+ { 55, 218, 225 },
+ { 13, 145, 200 },
+ { 1, 86, 141 },
+ { 1, 57, 99 },
+ { 1, 35, 61 },
+ { 1, 13, 22 },
+ },
+ { /* Coeff Band 5 */
+ { 15, 235, 212 },
+ { 1, 132, 184 },
+ { 1, 84, 139 },
+ { 1, 57, 97 },
+ { 1, 34, 56 },
+ { 1, 14, 23 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 181, 21, 201 },
+ { 61, 37, 123 },
+ { 10, 38, 71 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 47, 106, 172 },
+ { 95, 104, 173 },
+ { 42, 93, 159 },
+ { 18, 77, 131 },
+ { 4, 50, 81 },
+ { 1, 17, 23 },
+ },
+ { /* Coeff Band 2 */
+ { 62, 147, 199 },
+ { 44, 130, 189 },
+ { 28, 102, 154 },
+ { 18, 75, 115 },
+ { 2, 44, 65 },
+ { 1, 12, 19 },
+ },
+ { /* Coeff Band 3 */
+ { 55, 153, 210 },
+ { 24, 130, 194 },
+ { 3, 93, 146 },
+ { 1, 61, 97 },
+ { 1, 31, 50 },
+ { 1, 10, 16 },
+ },
+ { /* Coeff Band 4 */
+ { 49, 186, 223 },
+ { 17, 148, 204 },
+ { 1, 96, 142 },
+ { 1, 53, 83 },
+ { 1, 26, 44 },
+ { 1, 11, 17 },
+ },
+ { /* Coeff Band 5 */
+ { 13, 217, 212 },
+ { 2, 136, 180 },
+ { 1, 78, 124 },
+ { 1, 50, 83 },
+ { 1, 29, 49 },
+ { 1, 14, 23 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 197, 13, 247 },
+ { 82, 17, 222 },
+ { 25, 17, 162 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 126, 186, 247 },
+ { 234, 191, 243 },
+ { 176, 177, 234 },
+ { 104, 158, 220 },
+ { 66, 128, 186 },
+ { 55, 90, 137 },
+ },
+ { /* Coeff Band 2 */
+ { 111, 197, 242 },
+ { 46, 158, 219 },
+ { 9, 104, 171 },
+ { 2, 65, 125 },
+ { 1, 44, 80 },
+ { 1, 17, 91 },
+ },
+ { /* Coeff Band 3 */
+ { 104, 208, 245 },
+ { 39, 168, 224 },
+ { 3, 109, 162 },
+ { 1, 79, 124 },
+ { 1, 50, 102 },
+ { 1, 43, 102 },
+ },
+ { /* Coeff Band 4 */
+ { 84, 220, 246 },
+ { 31, 177, 231 },
+ { 2, 115, 180 },
+ { 1, 79, 134 },
+ { 1, 55, 77 },
+ { 1, 60, 79 },
+ },
+ { /* Coeff Band 5 */
+ { 43, 243, 240 },
+ { 8, 180, 217 },
+ { 1, 115, 166 },
+ { 1, 84, 121 },
+ { 1, 51, 67 },
+ { 1, 16, 6 },
+ },
+ },
+ },
+ },
+ },
+
+ .skip = { 192, 128, 64 },
+ .inter_mode = {
+ { 2, 173, 34 },
+ { 7, 145, 85 },
+ { 7, 166, 63 },
+ { 7, 94, 66 },
+ { 8, 64, 46 },
+ { 17, 81, 31 },
+ { 25, 29, 30 },
+ },
+ .interp_filter = {
+ { 235, 162 },
+ { 36, 255 },
+ { 34, 3 },
+ { 149, 144 },
+ },
+ .is_inter = { 9, 102, 187, 225 },
+ .comp_mode = { 239, 183, 119, 96, 41 },
+ .single_ref = {
+ { 33, 16 },
+ { 77, 74 },
+ { 142, 142 },
+ { 172, 170 },
+ { 238, 247 },
+ },
+ .comp_ref = { 50, 126, 123, 221, 226 },
+ .y_mode = {
+ { 65, 32, 18, 144, 162, 194, 41, 51, 98 },
+ { 132, 68, 18, 165, 217, 196, 45, 40, 78 },
+ { 173, 80, 19, 176, 240, 193, 64, 35, 46 },
+ { 221, 135, 38, 194, 248, 121, 96, 85, 29 },
+ },
+ .uv_mode = {
+ { 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */,
+ { 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */,
+ { 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */,
+ { 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */,
+ { 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */,
+ { 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */,
+ { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */,
+ { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d207 */,
+ { 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */,
+ { 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */
+ },
+ .partition = {
+ /* 8x8 -> 4x4 */
+ { 199, 122, 141 } /* a/l both not split */,
+ { 147, 63, 159 } /* a split, l not split */,
+ { 148, 133, 118 } /* l split, a not split */,
+ { 121, 104, 114 } /* a/l both split */,
+ /* 16x16 -> 8x8 */
+ { 174, 73, 87 } /* a/l both not split */,
+ { 92, 41, 83 } /* a split, l not split */,
+ { 82, 99, 50 } /* l split, a not split */,
+ { 53, 39, 39 } /* a/l both split */,
+ /* 32x32 -> 16x16 */
+ { 177, 58, 59 } /* a/l both not split */,
+ { 68, 26, 63 } /* a split, l not split */,
+ { 52, 79, 25 } /* l split, a not split */,
+ { 17, 14, 12 } /* a/l both split */,
+ /* 64x64 -> 32x32 */
+ { 222, 34, 30 } /* a/l both not split */,
+ { 72, 16, 44 } /* a split, l not split */,
+ { 58, 32, 12 } /* l split, a not split */,
+ { 10, 7, 6 } /* a/l both split */,
+ },
+
+ .mv = {
+ .joint = { 32, 64, 96 },
+ .sign = { 128, 128 },
+ .classes = {
+ { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 },
+ { 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 },
+ },
+ .class0_bit = { 216, 208 },
+ .bits = {
+ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240},
+ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240},
+ },
+ .class0_fr = {
+ {
+ { 128, 128, 64 },
+ { 96, 112, 64 },
+ },
+ {
+ { 128, 128, 64 },
+ { 96, 112, 64 },
+ },
+ },
+ .fr = {
+ { 64, 96, 64 },
+ { 64, 96, 64 },
+ },
+ .class0_hp = { 160, 160 },
+ .hp = { 128, 128 },
+ },
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_default_probs);
+
+static u32 fastdiv(u32 dividend, u16 divisor)
+{
+#define DIV_INV(d) ((u32)(((1ULL << 32) + ((d) - 1)) / (d)))
+#define DIVS_INV(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) \
+ DIV_INV(d0), DIV_INV(d1), DIV_INV(d2), DIV_INV(d3), \
+ DIV_INV(d4), DIV_INV(d5), DIV_INV(d6), DIV_INV(d7), \
+ DIV_INV(d8), DIV_INV(d9)
+
+ static const u32 inv[] = {
+ DIV_INV(2), DIV_INV(3), DIV_INV(4), DIV_INV(5),
+ DIV_INV(6), DIV_INV(7), DIV_INV(8), DIV_INV(9),
+ DIVS_INV(10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
+ DIVS_INV(20, 21, 22, 23, 24, 25, 26, 27, 28, 29),
+ DIVS_INV(30, 31, 32, 33, 34, 35, 36, 37, 38, 39),
+ DIVS_INV(40, 41, 42, 43, 44, 45, 46, 47, 48, 49),
+ DIVS_INV(50, 51, 52, 53, 54, 55, 56, 57, 58, 59),
+ DIVS_INV(60, 61, 62, 63, 64, 65, 66, 67, 68, 69),
+ DIVS_INV(70, 71, 72, 73, 74, 75, 76, 77, 78, 79),
+ DIVS_INV(80, 81, 82, 83, 84, 85, 86, 87, 88, 89),
+ DIVS_INV(90, 91, 92, 93, 94, 95, 96, 97, 98, 99),
+ DIVS_INV(100, 101, 102, 103, 104, 105, 106, 107, 108, 109),
+ DIVS_INV(110, 111, 112, 113, 114, 115, 116, 117, 118, 119),
+ DIVS_INV(120, 121, 122, 123, 124, 125, 126, 127, 128, 129),
+ DIVS_INV(130, 131, 132, 133, 134, 135, 136, 137, 138, 139),
+ DIVS_INV(140, 141, 142, 143, 144, 145, 146, 147, 148, 149),
+ DIVS_INV(150, 151, 152, 153, 154, 155, 156, 157, 158, 159),
+ DIVS_INV(160, 161, 162, 163, 164, 165, 166, 167, 168, 169),
+ DIVS_INV(170, 171, 172, 173, 174, 175, 176, 177, 178, 179),
+ DIVS_INV(180, 181, 182, 183, 184, 185, 186, 187, 188, 189),
+ DIVS_INV(190, 191, 192, 193, 194, 195, 196, 197, 198, 199),
+ DIVS_INV(200, 201, 202, 203, 204, 205, 206, 207, 208, 209),
+ DIVS_INV(210, 211, 212, 213, 214, 215, 216, 217, 218, 219),
+ DIVS_INV(220, 221, 222, 223, 224, 225, 226, 227, 228, 229),
+ DIVS_INV(230, 231, 232, 233, 234, 235, 236, 237, 238, 239),
+ DIVS_INV(240, 241, 242, 243, 244, 245, 246, 247, 248, 249),
+ DIV_INV(250), DIV_INV(251), DIV_INV(252), DIV_INV(253),
+ DIV_INV(254), DIV_INV(255), DIV_INV(256),
+ };
+
+ if (divisor == 0)
+ return 0;
+ else if (divisor == 1)
+ return dividend;
+
+ if (WARN_ON(divisor - 2 >= ARRAY_SIZE(inv)))
+ return dividend;
+
+ return ((u64)dividend * inv[divisor - 2]) >> 32;
+}
+
+/* 6.3.6 inv_recenter_nonneg(v, m) */
+static int inv_recenter_nonneg(int v, int m)
+{
+ if (v > 2 * m)
+ return v;
+
+ if (v & 1)
+ return m - ((v + 1) >> 1);
+
+ return m + (v >> 1);
+}
+
+/*
+ * part of 6.3.5 inv_remap_prob(deltaProb, prob)
+ * delta = inv_map_table[deltaProb] done by userspace
+ */
+static int update_prob(int delta, int prob)
+{
+ if (!delta)
+ return prob;
+
+ return prob <= 128 ?
+ 1 + inv_recenter_nonneg(delta, prob - 1) :
+ 255 - inv_recenter_nonneg(delta, 255 - prob);
+}
+
+/* Counterpart to 6.3.2 tx_mode_probs() */
+static void update_tx_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) {
+ u8 *p8x8 = probs->tx8[i];
+ u8 *p16x16 = probs->tx16[i];
+ u8 *p32x32 = probs->tx32[i];
+ const u8 *d8x8 = deltas->tx8[i];
+ const u8 *d16x16 = deltas->tx16[i];
+ const u8 *d32x32 = deltas->tx32[i];
+
+ p8x8[0] = update_prob(d8x8[0], p8x8[0]);
+ p16x16[0] = update_prob(d16x16[0], p16x16[0]);
+ p16x16[1] = update_prob(d16x16[1], p16x16[1]);
+ p32x32[0] = update_prob(d32x32[0], p32x32[0]);
+ p32x32[1] = update_prob(d32x32[1], p32x32[1]);
+ p32x32[2] = update_prob(d32x32[2], p32x32[2]);
+ }
+}
+
+#define BAND_6(band) ((band) == 0 ? 3 : 6)
+
+static void update_coeff(const u8 deltas[6][6][3], u8 probs[6][6][3])
+{
+ int l, m, n;
+
+ for (l = 0; l < 6; l++)
+ for (m = 0; m < BAND_6(l); m++) {
+ u8 *p = probs[l][m];
+ const u8 *d = deltas[l][m];
+
+ for (n = 0; n < 3; n++)
+ p[n] = update_prob(d[n], p[n]);
+ }
+}
+
+/* Counterpart to 6.3.7 read_coef_probs() */
+static void update_coef_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ int i, j, k;
+
+ for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
+ for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
+ for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
+ update_coeff(deltas->coef[i][j][k], probs->coef[i][j][k]);
+
+ if (deltas->tx_mode == i)
+ break;
+ }
+}
+
+/* Counterpart to 6.3.8 read_skip_prob() */
+static void update_skip_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->skip); i++)
+ probs->skip[i] = update_prob(deltas->skip[i], probs->skip[i]);
+}
+
+/* Counterpart to 6.3.9 read_inter_mode_probs() */
+static void update_inter_mode_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) {
+ u8 *p = probs->inter_mode[i];
+ const u8 *d = deltas->inter_mode[i];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ p[2] = update_prob(d[2], p[2]);
+ }
+}
+
+/* Counterpart to 6.3.10 read_interp_filter_probs() */
+static void update_interp_filter_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) {
+ u8 *p = probs->interp_filter[i];
+ const u8 *d = deltas->interp_filter[i];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ }
+}
+
+/* Counterpart to 6.3.11 read_is_inter_probs() */
+static void update_is_inter_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++)
+ probs->is_inter[i] = update_prob(deltas->is_inter[i], probs->is_inter[i]);
+}
+
+/* 6.3.12 frame_reference_mode() done entirely in userspace */
+
+/* Counterpart to 6.3.13 frame_reference_mode_probs() */
+static void
+update_frame_reference_mode_probs(unsigned int reference_mode,
+ struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ if (reference_mode == V4L2_VP9_REFERENCE_MODE_SELECT)
+ for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++)
+ probs->comp_mode[i] = update_prob(deltas->comp_mode[i],
+ probs->comp_mode[i]);
+
+ if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE)
+ for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) {
+ u8 *p = probs->single_ref[i];
+ const u8 *d = deltas->single_ref[i];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ }
+
+ if (reference_mode != V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE)
+ for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++)
+ probs->comp_ref[i] = update_prob(deltas->comp_ref[i], probs->comp_ref[i]);
+}
+
+/* Counterpart to 6.3.14 read_y_mode_probs() */
+static void update_y_mode_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++)
+ for (j = 0; j < ARRAY_SIZE(probs->y_mode[0]); ++j)
+ probs->y_mode[i][j] =
+ update_prob(deltas->y_mode[i][j], probs->y_mode[i][j]);
+}
+
+/* Counterpart to 6.3.15 read_partition_probs() */
+static void update_partition_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i, j;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++) {
+ u8 *p = probs->partition[i * 4 + j];
+ const u8 *d = deltas->partition[i * 4 + j];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ p[2] = update_prob(d[2], p[2]);
+ }
+}
+
+static inline int update_mv_prob(int delta, int prob)
+{
+ if (!delta)
+ return prob;
+
+ return delta;
+}
+
+/* Counterpart to 6.3.16 mv_probs() */
+static void update_mv_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ u8 *p = probs->mv.joint;
+ const u8 *d = deltas->mv.joint;
+ unsigned int i, j;
+
+ p[0] = update_mv_prob(d[0], p[0]);
+ p[1] = update_mv_prob(d[1], p[1]);
+ p[2] = update_mv_prob(d[2], p[2]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) {
+ p = probs->mv.sign;
+ d = deltas->mv.sign;
+ p[i] = update_mv_prob(d[i], p[i]);
+
+ p = probs->mv.classes[i];
+ d = deltas->mv.classes[i];
+ for (j = 0; j < ARRAY_SIZE(probs->mv.classes[0]); j++)
+ p[j] = update_mv_prob(d[j], p[j]);
+
+ p = probs->mv.class0_bit;
+ d = deltas->mv.class0_bit;
+ p[i] = update_mv_prob(d[i], p[i]);
+
+ p = probs->mv.bits[i];
+ d = deltas->mv.bits[i];
+ for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++)
+ p[j] = update_mv_prob(d[j], p[j]);
+
+ for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) {
+ p = probs->mv.class0_fr[i][j];
+ d = deltas->mv.class0_fr[i][j];
+
+ p[0] = update_mv_prob(d[0], p[0]);
+ p[1] = update_mv_prob(d[1], p[1]);
+ p[2] = update_mv_prob(d[2], p[2]);
+ }
+
+ p = probs->mv.fr[i];
+ d = deltas->mv.fr[i];
+ for (j = 0; j < ARRAY_SIZE(probs->mv.fr[i]); j++)
+ p[j] = update_mv_prob(d[j], p[j]);
+
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV) {
+ p = probs->mv.class0_hp;
+ d = deltas->mv.class0_hp;
+ p[i] = update_mv_prob(d[i], p[i]);
+
+ p = probs->mv.hp;
+ d = deltas->mv.hp;
+ p[i] = update_mv_prob(d[i], p[i]);
+ }
+ }
+}
+
+/* Counterpart to 6.3 compressed_header(), but parsing has been done in userspace. */
+void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ if (deltas->tx_mode == V4L2_VP9_TX_MODE_SELECT)
+ update_tx_probs(probs, deltas);
+
+ update_coef_probs(probs, deltas, dec_params);
+
+ update_skip_probs(probs, deltas);
+
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY)
+ return;
+
+ update_inter_mode_probs(probs, deltas);
+
+ if (dec_params->interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE)
+ update_interp_filter_probs(probs, deltas);
+
+ update_is_inter_probs(probs, deltas);
+
+ update_frame_reference_mode_probs(dec_params->reference_mode, probs, deltas);
+
+ update_y_mode_probs(probs, deltas);
+
+ update_partition_probs(probs, deltas);
+
+ update_mv_probs(probs, deltas, dec_params);
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_fw_update_probs);
+
+u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct v4l2_vp9_frame_context *frame_context)
+{
+ int i;
+
+ u8 fctx_idx = dec_params->frame_context_idx;
+
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) {
+ /*
+ * setup_past_independence()
+ * We do nothing here. Instead of storing default probs in some intermediate
+ * location and then copying from that location to appropriate contexts
+ * in save_probs() below, we skip that step and save default probs directly
+ * to appropriate contexts.
+ */
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT ||
+ dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_ALL)
+ for (i = 0; i < 4; ++i)
+ /* save_probs(i) */
+ memcpy(&frame_context[i], &v4l2_vp9_default_probs,
+ sizeof(v4l2_vp9_default_probs));
+ else if (dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_SPEC)
+ /* save_probs(fctx_idx) */
+ memcpy(&frame_context[fctx_idx], &v4l2_vp9_default_probs,
+ sizeof(v4l2_vp9_default_probs));
+ fctx_idx = 0;
+ }
+
+ return fctx_idx;
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_reset_frame_ctx);
+
+/* 8.4.1 Merge prob process */
+static u8 merge_prob(u8 pre_prob, u32 ct0, u32 ct1, u16 count_sat, u32 max_update_factor)
+{
+ u32 den, prob, count, factor;
+
+ den = ct0 + ct1;
+ if (!den) {
+ /*
+ * prob = 128, count = 0, update_factor = 0
+ * Round2's argument: pre_prob * 256
+ * (pre_prob * 256 + 128) >> 8 == pre_prob
+ */
+ return pre_prob;
+ }
+
+ prob = clamp(((ct0 << 8) + (den >> 1)) / den, (u32)1, (u32)255);
+ count = min_t(u32, den, count_sat);
+ factor = fastdiv(max_update_factor * count, count_sat);
+
+ /*
+ * Round2(pre_prob * (256 - factor) + prob * factor, 8)
+ * Round2(pre_prob * 256 + (prob - pre_prob) * factor, 8)
+ * (pre_prob * 256 >> 8) + (((prob - pre_prob) * factor + 128) >> 8)
+ */
+ return pre_prob + (((prob - pre_prob) * factor + 128) >> 8);
+}
+
+static inline u8 noncoef_merge_prob(u8 pre_prob, u32 ct0, u32 ct1)
+{
+ return merge_prob(pre_prob, ct0, ct1, 20, 128);
+}
+
+/* 8.4.2 Merge probs process */
+/*
+ * merge_probs() is a recursive function in the spec. We avoid recursion in the kernel.
+ * That said, the "tree" parameter of merge_probs() controls how deep the recursion goes.
+ * It turns out that in all cases the recursive calls boil down to a short-ish series
+ * of merge_prob() invocations (note no "s").
+ *
+ * Variant A
+ * ---------
+ * merge_probs(small_token_tree, 2):
+ * merge_prob(p[1], c[0], c[1] + c[2])
+ * merge_prob(p[2], c[1], c[2])
+ *
+ * Variant B
+ * ---------
+ * merge_probs(binary_tree, 0) or
+ * merge_probs(tx_size_8_tree, 0):
+ * merge_prob(p[0], c[0], c[1])
+ *
+ * Variant C
+ * ---------
+ * merge_probs(inter_mode_tree, 0):
+ * merge_prob(p[0], c[2], c[1] + c[0] + c[3])
+ * merge_prob(p[1], c[0], c[1] + c[3])
+ * merge_prob(p[2], c[1], c[3])
+ *
+ * Variant D
+ * ---------
+ * merge_probs(intra_mode_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + ... + c[9])
+ * merge_prob(p[1], c[9], c[1] + ... + c[8])
+ * merge_prob(p[2], c[1], c[2] + ... + c[8])
+ * merge_prob(p[3], c[2] + c[4] + c[5], c[3] + c[8] + c[6] + c[7])
+ * merge_prob(p[4], c[2], c[4] + c[5])
+ * merge_prob(p[5], c[4], c[5])
+ * merge_prob(p[6], c[3], c[8] + c[6] + c[7])
+ * merge_prob(p[7], c[8], c[6] + c[7])
+ * merge_prob(p[8], c[6], c[7])
+ *
+ * Variant E
+ * ---------
+ * merge_probs(partition_tree, 0) or
+ * merge_probs(tx_size_32_tree, 0) or
+ * merge_probs(mv_joint_tree, 0) or
+ * merge_probs(mv_fr_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + c[2] + c[3])
+ * merge_prob(p[1], c[1], c[2] + c[3])
+ * merge_prob(p[2], c[2], c[3])
+ *
+ * Variant F
+ * ---------
+ * merge_probs(interp_filter_tree, 0) or
+ * merge_probs(tx_size_16_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + c[2])
+ * merge_prob(p[1], c[1], c[2])
+ *
+ * Variant G
+ * ---------
+ * merge_probs(mv_class_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + ... + c[10])
+ * merge_prob(p[1], c[1], c[2] + ... + c[10])
+ * merge_prob(p[2], c[2] + c[3], c[4] + ... + c[10])
+ * merge_prob(p[3], c[2], c[3])
+ * merge_prob(p[4], c[4] + c[5], c[6] + ... + c[10])
+ * merge_prob(p[5], c[4], c[5])
+ * merge_prob(p[6], c[6], c[7] + ... + c[10])
+ * merge_prob(p[7], c[7] + c[8], c[9] + c[10])
+ * merge_prob(p[8], c[7], c[8])
+ * merge_prob(p[9], c[9], [10])
+ */
+
+static inline void merge_probs_variant_a(u8 *p, const u32 *c, u16 count_sat, u32 update_factor)
+{
+ p[1] = merge_prob(p[1], c[0], c[1] + c[2], count_sat, update_factor);
+ p[2] = merge_prob(p[2], c[1], c[2], count_sat, update_factor);
+}
+
+static inline void merge_probs_variant_b(u8 *p, const u32 *c, u16 count_sat, u32 update_factor)
+{
+ p[0] = merge_prob(p[0], c[0], c[1], count_sat, update_factor);
+}
+
+static inline void merge_probs_variant_c(u8 *p, const u32 *c)
+{
+ p[0] = noncoef_merge_prob(p[0], c[2], c[1] + c[0] + c[3]);
+ p[1] = noncoef_merge_prob(p[1], c[0], c[1] + c[3]);
+ p[2] = noncoef_merge_prob(p[2], c[1], c[3]);
+}
+
+static void merge_probs_variant_d(u8 *p, const u32 *c)
+{
+ u32 sum = 0, s2;
+
+ sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
+
+ p[0] = noncoef_merge_prob(p[0], c[0], sum);
+ sum -= c[9];
+ p[1] = noncoef_merge_prob(p[1], c[9], sum);
+ sum -= c[1];
+ p[2] = noncoef_merge_prob(p[2], c[1], sum);
+ s2 = c[2] + c[4] + c[5];
+ sum -= s2;
+ p[3] = noncoef_merge_prob(p[3], s2, sum);
+ s2 -= c[2];
+ p[4] = noncoef_merge_prob(p[4], c[2], s2);
+ p[5] = noncoef_merge_prob(p[5], c[4], c[5]);
+ sum -= c[3];
+ p[6] = noncoef_merge_prob(p[6], c[3], sum);
+ sum -= c[8];
+ p[7] = noncoef_merge_prob(p[7], c[8], sum);
+ p[8] = noncoef_merge_prob(p[8], c[6], c[7]);
+}
+
+static inline void merge_probs_variant_e(u8 *p, const u32 *c)
+{
+ p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2] + c[3]);
+ p[1] = noncoef_merge_prob(p[1], c[1], c[2] + c[3]);
+ p[2] = noncoef_merge_prob(p[2], c[2], c[3]);
+}
+
+static inline void merge_probs_variant_f(u8 *p, const u32 *c)
+{
+ p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2]);
+ p[1] = noncoef_merge_prob(p[1], c[1], c[2]);
+}
+
+static void merge_probs_variant_g(u8 *p, const u32 *c)
+{
+ u32 sum;
+
+ sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
+ p[0] = noncoef_merge_prob(p[0], c[0], sum);
+ sum -= c[1];
+ p[1] = noncoef_merge_prob(p[1], c[1], sum);
+ sum -= c[2] + c[3];
+ p[2] = noncoef_merge_prob(p[2], c[2] + c[3], sum);
+ p[3] = noncoef_merge_prob(p[3], c[2], c[3]);
+ sum -= c[4] + c[5];
+ p[4] = noncoef_merge_prob(p[4], c[4] + c[5], sum);
+ p[5] = noncoef_merge_prob(p[5], c[4], c[5]);
+ sum -= c[6];
+ p[6] = noncoef_merge_prob(p[6], c[6], sum);
+ p[7] = noncoef_merge_prob(p[7], c[7] + c[8], c[9] + c[10]);
+ p[8] = noncoef_merge_prob(p[8], c[7], c[8]);
+ p[9] = noncoef_merge_prob(p[9], c[9], c[10]);
+}
+
+/* 8.4.3 Coefficient probability adaptation process */
+static inline void adapt_probs_variant_a_coef(u8 *p, const u32 *c, u32 update_factor)
+{
+ merge_probs_variant_a(p, c, 24, update_factor);
+}
+
+static inline void adapt_probs_variant_b_coef(u8 *p, const u32 *c, u32 update_factor)
+{
+ merge_probs_variant_b(p, c, 24, update_factor);
+}
+
+static void _adapt_coeff(unsigned int i, unsigned int j, unsigned int k,
+ struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_vp9_frame_symbol_counts *counts,
+ u32 uf)
+{
+ s32 l, m;
+
+ for (l = 0; l < ARRAY_SIZE(probs->coef[0][0][0]); l++) {
+ for (m = 0; m < BAND_6(l); m++) {
+ u8 *p = probs->coef[i][j][k][l][m];
+ const u32 counts_more_coefs[2] = {
+ *counts->eob[i][j][k][l][m][1],
+ *counts->eob[i][j][k][l][m][0] - *counts->eob[i][j][k][l][m][1],
+ };
+
+ adapt_probs_variant_a_coef(p, *counts->coeff[i][j][k][l][m], uf);
+ adapt_probs_variant_b_coef(p, counts_more_coefs, uf);
+ }
+ }
+}
+
+static void _adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_vp9_frame_symbol_counts *counts,
+ unsigned int uf)
+{
+ unsigned int i, j, k;
+
+ for (i = 0; i < ARRAY_SIZE(probs->coef); i++)
+ for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
+ for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
+ _adapt_coeff(i, j, k, probs, counts, uf);
+}
+
+void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ bool use_128,
+ bool frame_is_intra)
+{
+ if (frame_is_intra) {
+ _adapt_coef_probs(probs, counts, 112);
+ } else {
+ if (use_128)
+ _adapt_coef_probs(probs, counts, 128);
+ else
+ _adapt_coef_probs(probs, counts, 112);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_coef_probs);
+
+/* 8.4.4 Non coefficient probability adaptation process, adapt_probs() */
+static inline void adapt_probs_variant_b(u8 *p, const u32 *c)
+{
+ merge_probs_variant_b(p, c, 20, 128);
+}
+
+static inline void adapt_probs_variant_c(u8 *p, const u32 *c)
+{
+ merge_probs_variant_c(p, c);
+}
+
+static inline void adapt_probs_variant_d(u8 *p, const u32 *c)
+{
+ merge_probs_variant_d(p, c);
+}
+
+static inline void adapt_probs_variant_e(u8 *p, const u32 *c)
+{
+ merge_probs_variant_e(p, c);
+}
+
+static inline void adapt_probs_variant_f(u8 *p, const u32 *c)
+{
+ merge_probs_variant_f(p, c);
+}
+
+static inline void adapt_probs_variant_g(u8 *p, const u32 *c)
+{
+ merge_probs_variant_g(p, c);
+}
+
+/* 8.4.4 Non coefficient probability adaptation process, adapt_prob() */
+static inline u8 adapt_prob(u8 prob, const u32 counts[2])
+{
+ return noncoef_merge_prob(prob, counts[0], counts[1]);
+}
+
+/* 8.4.4 Non coefficient probability adaptation process */
+void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ u8 reference_mode, u8 interpolation_filter, u8 tx_mode,
+ u32 flags)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++)
+ probs->is_inter[i] = adapt_prob(probs->is_inter[i], (*counts->intra_inter)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++)
+ probs->comp_mode[i] = adapt_prob(probs->comp_mode[i], (*counts->comp)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++)
+ probs->comp_ref[i] = adapt_prob(probs->comp_ref[i], (*counts->comp_ref)[i]);
+
+ if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE)
+ for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++)
+ for (j = 0; j < ARRAY_SIZE(probs->single_ref[0]); j++)
+ probs->single_ref[i][j] = adapt_prob(probs->single_ref[i][j],
+ (*counts->single_ref)[i][j]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++)
+ adapt_probs_variant_c(probs->inter_mode[i], (*counts->mv_mode)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++)
+ adapt_probs_variant_d(probs->y_mode[i], (*counts->y_mode)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->uv_mode); i++)
+ adapt_probs_variant_d(probs->uv_mode[i], (*counts->uv_mode)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->partition); i++)
+ adapt_probs_variant_e(probs->partition[i], (*counts->partition)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->skip); i++)
+ probs->skip[i] = adapt_prob(probs->skip[i], (*counts->skip)[i]);
+
+ if (interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE)
+ for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++)
+ adapt_probs_variant_f(probs->interp_filter[i], (*counts->filter)[i]);
+
+ if (tx_mode == V4L2_VP9_TX_MODE_SELECT)
+ for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) {
+ adapt_probs_variant_b(probs->tx8[i], (*counts->tx8p)[i]);
+ adapt_probs_variant_f(probs->tx16[i], (*counts->tx16p)[i]);
+ adapt_probs_variant_e(probs->tx32[i], (*counts->tx32p)[i]);
+ }
+
+ adapt_probs_variant_e(probs->mv.joint, *counts->mv_joint);
+
+ for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) {
+ probs->mv.sign[i] = adapt_prob(probs->mv.sign[i], (*counts->sign)[i]);
+
+ adapt_probs_variant_g(probs->mv.classes[i], (*counts->classes)[i]);
+
+ probs->mv.class0_bit[i] = adapt_prob(probs->mv.class0_bit[i], (*counts->class0)[i]);
+
+ for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++)
+ probs->mv.bits[i][j] = adapt_prob(probs->mv.bits[i][j],
+ (*counts->bits)[i][j]);
+
+ for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++)
+ adapt_probs_variant_e(probs->mv.class0_fr[i][j],
+ (*counts->class0_fp)[i][j]);
+
+ adapt_probs_variant_e(probs->mv.fr[i], (*counts->fp)[i]);
+
+ if (!(flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV))
+ continue;
+
+ probs->mv.class0_hp[i] = adapt_prob(probs->mv.class0_hp[i],
+ (*counts->class0_hp)[i]);
+
+ probs->mv.hp[i] = adapt_prob(probs->mv.hp[i], (*counts->hp)[i]);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_noncoef_probs);
+
+bool
+v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled,
+ unsigned int feature,
+ unsigned int segid)
+{
+ u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature);
+
+ return !!(feature_enabled[segid] & mask);
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_seg_feat_enabled);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("V4L2 VP9 Helpers");
+MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@collabora.com>");
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
deleted file mode 100644
index fb5ee5dd8fe9..000000000000
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ /dev/null
@@ -1,1193 +0,0 @@
-/*
- * generic helper functions for handling video4linux capture buffers
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
- * (c) 2006 Ted Walther and John Sokol
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-
-#include <media/videobuf-core.h>
-
-#define MAGIC_BUFFER 0x20070728
-#define MAGIC_CHECK(is, should) \
- do { \
- if (unlikely((is) != (should))) { \
- printk(KERN_ERR \
- "magic mismatch: %x (expected %x)\n", \
- is, should); \
- BUG(); \
- } \
- } while (0)
-
-static int debug;
-module_param(debug, int, 0644);
-
-MODULE_DESCRIPTION("helper module to manage video4linux buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
-MODULE_LICENSE("GPL");
-
-#define dprintk(level, fmt, arg...) \
- do { \
- if (debug >= level) \
- printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
- } while (0)
-
-/* --------------------------------------------------------------------- */
-
-#define CALL(q, f, arg...) \
- ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
-
-struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
-{
- struct videobuf_buffer *vb;
-
- BUG_ON(q->msize < sizeof(*vb));
-
- if (!q->int_ops || !q->int_ops->alloc_vb) {
- printk(KERN_ERR "No specific ops defined!\n");
- BUG();
- }
-
- vb = q->int_ops->alloc_vb(q->msize);
- if (NULL != vb) {
- init_waitqueue_head(&vb->done);
- vb->magic = MAGIC_BUFFER;
- }
-
- return vb;
-}
-EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
-
-static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb)
-{
- unsigned long flags;
- bool rc;
-
- spin_lock_irqsave(q->irqlock, flags);
- rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
- spin_unlock_irqrestore(q->irqlock, flags);
- return rc;
-};
-
-int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
- int non_blocking, int intr)
-{
- bool is_ext_locked;
- int ret = 0;
-
- MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
-
- if (non_blocking) {
- if (is_state_active_or_queued(q, vb))
- return 0;
- return -EAGAIN;
- }
-
- is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
-
- /* Release vdev lock to prevent this wait from blocking outside access to
- the device. */
- if (is_ext_locked)
- mutex_unlock(q->ext_lock);
- if (intr)
- ret = wait_event_interruptible(vb->done, is_state_active_or_queued(q, vb));
- else
- wait_event(vb->done, is_state_active_or_queued(q, vb));
- /* Relock */
- if (is_ext_locked)
- mutex_lock(q->ext_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_waiton);
-
-int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
-{
- MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- return CALL(q, iolock, q, vb, fbuf);
-}
-EXPORT_SYMBOL_GPL(videobuf_iolock);
-
-void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- if (q->int_ops->vaddr)
- return q->int_ops->vaddr(buf);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
-
-/* --------------------------------------------------------------------- */
-
-
-void videobuf_queue_core_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct videobuf_qtype_ops *int_ops,
- struct mutex *ext_lock)
-{
- BUG_ON(!q);
- memset(q, 0, sizeof(*q));
- q->irqlock = irqlock;
- q->ext_lock = ext_lock;
- q->dev = dev;
- q->type = type;
- q->field = field;
- q->msize = msize;
- q->ops = ops;
- q->priv_data = priv;
- q->int_ops = int_ops;
-
- /* All buffer operations are mandatory */
- BUG_ON(!q->ops->buf_setup);
- BUG_ON(!q->ops->buf_prepare);
- BUG_ON(!q->ops->buf_queue);
- BUG_ON(!q->ops->buf_release);
-
- /* Lock is mandatory for queue_cancel to work */
- BUG_ON(!irqlock);
-
- /* Having implementations for abstract methods are mandatory */
- BUG_ON(!q->int_ops);
-
- mutex_init(&q->vb_lock);
- init_waitqueue_head(&q->wait);
- INIT_LIST_HEAD(&q->stream);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
-
-/* Locking: Only usage in bttv unsafe find way to remove */
-int videobuf_queue_is_busy(struct videobuf_queue *q)
-{
- int i;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- if (q->streaming) {
- dprintk(1, "busy: streaming active\n");
- return 1;
- }
- if (q->reading) {
- dprintk(1, "busy: pending read #1\n");
- return 1;
- }
- if (q->read_buf) {
- dprintk(1, "busy: pending read #2\n");
- return 1;
- }
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- if (q->bufs[i]->map) {
- dprintk(1, "busy: buffer #%d mapped\n", i);
- return 1;
- }
- if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
- dprintk(1, "busy: buffer #%d queued\n", i);
- return 1;
- }
- if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
- dprintk(1, "busy: buffer #%d avtive\n", i);
- return 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
-
-/**
- * __videobuf_free() - free all the buffers and their control structures
- *
- * This function can only be called if streaming/reading is off, i.e. no buffers
- * are under control of the driver.
- */
-/* Locking: Caller holds q->vb_lock */
-static int __videobuf_free(struct videobuf_queue *q)
-{
- int i;
-
- dprintk(1, "%s\n", __func__);
- if (!q)
- return 0;
-
- if (q->streaming || q->reading) {
- dprintk(1, "Cannot free buffers when streaming or reading\n");
- return -EBUSY;
- }
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++)
- if (q->bufs[i] && q->bufs[i]->map) {
- dprintk(1, "Cannot free mmapped buffers\n");
- return -EBUSY;
- }
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- q->ops->buf_release(q, q->bufs[i]);
- kfree(q->bufs[i]);
- q->bufs[i] = NULL;
- }
-
- return 0;
-}
-
-/* Locking: Caller holds q->vb_lock */
-void videobuf_queue_cancel(struct videobuf_queue *q)
-{
- unsigned long flags = 0;
- int i;
-
- q->streaming = 0;
- q->reading = 0;
- wake_up_interruptible_sync(&q->wait);
-
- /* remove queued buffers from list */
- spin_lock_irqsave(q->irqlock, flags);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
- list_del(&q->bufs[i]->queue);
- q->bufs[i]->state = VIDEOBUF_ERROR;
- wake_up_all(&q->bufs[i]->done);
- }
- }
- spin_unlock_irqrestore(q->irqlock, flags);
-
- /* free all buffers + clear queue */
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- q->ops->buf_release(q, q->bufs[i]);
- }
- INIT_LIST_HEAD(&q->stream);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
-
-/* --------------------------------------------------------------------- */
-
-/* Locking: Caller holds q->vb_lock */
-enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
-{
- enum v4l2_field field = q->field;
-
- BUG_ON(V4L2_FIELD_ANY == field);
-
- if (V4L2_FIELD_ALTERNATE == field) {
- if (V4L2_FIELD_TOP == q->last) {
- field = V4L2_FIELD_BOTTOM;
- q->last = V4L2_FIELD_BOTTOM;
- } else {
- field = V4L2_FIELD_TOP;
- q->last = V4L2_FIELD_TOP;
- }
- }
- return field;
-}
-EXPORT_SYMBOL_GPL(videobuf_next_field);
-
-/* Locking: Caller holds q->vb_lock */
-static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
- struct videobuf_buffer *vb, enum v4l2_buf_type type)
-{
- MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- b->index = vb->i;
- b->type = type;
-
- b->memory = vb->memory;
- switch (b->memory) {
- case V4L2_MEMORY_MMAP:
- b->m.offset = vb->boff;
- b->length = vb->bsize;
- break;
- case V4L2_MEMORY_USERPTR:
- b->m.userptr = vb->baddr;
- b->length = vb->bsize;
- break;
- case V4L2_MEMORY_OVERLAY:
- b->m.offset = vb->boff;
- break;
- case V4L2_MEMORY_DMABUF:
- /* DMABUF is not handled in videobuf framework */
- break;
- }
-
- b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- if (vb->map)
- b->flags |= V4L2_BUF_FLAG_MAPPED;
-
- switch (vb->state) {
- case VIDEOBUF_PREPARED:
- case VIDEOBUF_QUEUED:
- case VIDEOBUF_ACTIVE:
- b->flags |= V4L2_BUF_FLAG_QUEUED;
- break;
- case VIDEOBUF_ERROR:
- b->flags |= V4L2_BUF_FLAG_ERROR;
- /* fall through */
- case VIDEOBUF_DONE:
- b->flags |= V4L2_BUF_FLAG_DONE;
- break;
- case VIDEOBUF_NEEDS_INIT:
- case VIDEOBUF_IDLE:
- /* nothing */
- break;
- }
-
- b->field = vb->field;
- b->timestamp = vb->ts;
- b->bytesused = vb->size;
- b->sequence = vb->field_count >> 1;
-}
-
-int videobuf_mmap_free(struct videobuf_queue *q)
-{
- int ret;
- videobuf_queue_lock(q);
- ret = __videobuf_free(q);
- videobuf_queue_unlock(q);
- return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_mmap_free);
-
-/* Locking: Caller holds q->vb_lock */
-int __videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory)
-{
- unsigned int i;
- int err;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- err = __videobuf_free(q);
- if (0 != err)
- return err;
-
- /* Allocate and initialize buffers */
- for (i = 0; i < bcount; i++) {
- q->bufs[i] = videobuf_alloc_vb(q);
-
- if (NULL == q->bufs[i])
- break;
-
- q->bufs[i]->i = i;
- q->bufs[i]->memory = memory;
- q->bufs[i]->bsize = bsize;
- switch (memory) {
- case V4L2_MEMORY_MMAP:
- q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
- break;
- case V4L2_MEMORY_USERPTR:
- case V4L2_MEMORY_OVERLAY:
- case V4L2_MEMORY_DMABUF:
- /* nothing */
- break;
- }
- }
-
- if (!i)
- return -ENOMEM;
-
- dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
-
- return i;
-}
-EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
-
-int videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory)
-{
- int ret;
- videobuf_queue_lock(q);
- ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
- videobuf_queue_unlock(q);
- return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
-
-int videobuf_reqbufs(struct videobuf_queue *q,
- struct v4l2_requestbuffers *req)
-{
- unsigned int size, count;
- int retval;
-
- if (req->count < 1) {
- dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
- return -EINVAL;
- }
-
- if (req->memory != V4L2_MEMORY_MMAP &&
- req->memory != V4L2_MEMORY_USERPTR &&
- req->memory != V4L2_MEMORY_OVERLAY) {
- dprintk(1, "reqbufs: memory type invalid\n");
- return -EINVAL;
- }
-
- videobuf_queue_lock(q);
- if (req->type != q->type) {
- dprintk(1, "reqbufs: queue type invalid\n");
- retval = -EINVAL;
- goto done;
- }
-
- if (q->streaming) {
- dprintk(1, "reqbufs: streaming already exists\n");
- retval = -EBUSY;
- goto done;
- }
- if (!list_empty(&q->stream)) {
- dprintk(1, "reqbufs: stream running\n");
- retval = -EBUSY;
- goto done;
- }
-
- count = req->count;
- if (count > VIDEO_MAX_FRAME)
- count = VIDEO_MAX_FRAME;
- size = 0;
- q->ops->buf_setup(q, &count, &size);
- dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
- count, size,
- (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
-
- retval = __videobuf_mmap_setup(q, count, size, req->memory);
- if (retval < 0) {
- dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
- goto done;
- }
-
- req->count = retval;
- retval = 0;
-
- done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_reqbufs);
-
-int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-{
- int ret = -EINVAL;
-
- videobuf_queue_lock(q);
- if (unlikely(b->type != q->type)) {
- dprintk(1, "querybuf: Wrong type.\n");
- goto done;
- }
- if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
- dprintk(1, "querybuf: index out of range.\n");
- goto done;
- }
- if (unlikely(NULL == q->bufs[b->index])) {
- dprintk(1, "querybuf: buffer is null.\n");
- goto done;
- }
-
- videobuf_status(q, b, q->bufs[b->index], q->type);
-
- ret = 0;
-done:
- videobuf_queue_unlock(q);
- return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_querybuf);
-
-int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-{
- struct videobuf_buffer *buf;
- enum v4l2_field field;
- unsigned long flags = 0;
- int retval;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- if (b->memory == V4L2_MEMORY_MMAP)
- down_read(&current->mm->mmap_sem);
-
- videobuf_queue_lock(q);
- retval = -EBUSY;
- if (q->reading) {
- dprintk(1, "qbuf: Reading running...\n");
- goto done;
- }
- retval = -EINVAL;
- if (b->type != q->type) {
- dprintk(1, "qbuf: Wrong type.\n");
- goto done;
- }
- if (b->index >= VIDEO_MAX_FRAME) {
- dprintk(1, "qbuf: index out of range.\n");
- goto done;
- }
- buf = q->bufs[b->index];
- if (NULL == buf) {
- dprintk(1, "qbuf: buffer is null.\n");
- goto done;
- }
- MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
- if (buf->memory != b->memory) {
- dprintk(1, "qbuf: memory type is wrong.\n");
- goto done;
- }
- if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
- dprintk(1, "qbuf: buffer is already queued or active.\n");
- goto done;
- }
-
- switch (b->memory) {
- case V4L2_MEMORY_MMAP:
- if (0 == buf->baddr) {
- dprintk(1, "qbuf: mmap requested "
- "but buffer addr is zero!\n");
- goto done;
- }
- if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
- || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
- || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
- buf->size = b->bytesused;
- buf->field = b->field;
- buf->ts = b->timestamp;
- }
- break;
- case V4L2_MEMORY_USERPTR:
- if (b->length < buf->bsize) {
- dprintk(1, "qbuf: buffer length is not enough\n");
- goto done;
- }
- if (VIDEOBUF_NEEDS_INIT != buf->state &&
- buf->baddr != b->m.userptr)
- q->ops->buf_release(q, buf);
- buf->baddr = b->m.userptr;
- break;
- case V4L2_MEMORY_OVERLAY:
- buf->boff = b->m.offset;
- break;
- default:
- dprintk(1, "qbuf: wrong memory type\n");
- goto done;
- }
-
- dprintk(1, "qbuf: requesting next field\n");
- field = videobuf_next_field(q);
- retval = q->ops->buf_prepare(q, buf, field);
- if (0 != retval) {
- dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
- goto done;
- }
-
- list_add_tail(&buf->stream, &q->stream);
- if (q->streaming) {
- spin_lock_irqsave(q->irqlock, flags);
- q->ops->buf_queue(q, buf);
- spin_unlock_irqrestore(q->irqlock, flags);
- }
- dprintk(1, "qbuf: succeeded\n");
- retval = 0;
- wake_up_interruptible_sync(&q->wait);
-
-done:
- videobuf_queue_unlock(q);
-
- if (b->memory == V4L2_MEMORY_MMAP)
- up_read(&current->mm->mmap_sem);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_qbuf);
-
-/* Locking: Caller holds q->vb_lock */
-static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
-{
- int retval;
-
-checks:
- if (!q->streaming) {
- dprintk(1, "next_buffer: Not streaming\n");
- retval = -EINVAL;
- goto done;
- }
-
- if (list_empty(&q->stream)) {
- if (noblock) {
- retval = -EAGAIN;
- dprintk(2, "next_buffer: no buffers to dequeue\n");
- goto done;
- } else {
- dprintk(2, "next_buffer: waiting on buffer\n");
-
- /* Drop lock to avoid deadlock with qbuf */
- videobuf_queue_unlock(q);
-
- /* Checking list_empty and streaming is safe without
- * locks because we goto checks to validate while
- * holding locks before proceeding */
- retval = wait_event_interruptible(q->wait,
- !list_empty(&q->stream) || !q->streaming);
- videobuf_queue_lock(q);
-
- if (retval)
- goto done;
-
- goto checks;
- }
- }
-
- retval = 0;
-
-done:
- return retval;
-}
-
-/* Locking: Caller holds q->vb_lock */
-static int stream_next_buffer(struct videobuf_queue *q,
- struct videobuf_buffer **vb, int nonblocking)
-{
- int retval;
- struct videobuf_buffer *buf = NULL;
-
- retval = stream_next_buffer_check_queue(q, nonblocking);
- if (retval)
- goto done;
-
- buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
- retval = videobuf_waiton(q, buf, nonblocking, 1);
- if (retval < 0)
- goto done;
-
- *vb = buf;
-done:
- return retval;
-}
-
-int videobuf_dqbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b, int nonblocking)
-{
- struct videobuf_buffer *buf = NULL;
- int retval;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- memset(b, 0, sizeof(*b));
- videobuf_queue_lock(q);
-
- retval = stream_next_buffer(q, &buf, nonblocking);
- if (retval < 0) {
- dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
- goto done;
- }
-
- switch (buf->state) {
- case VIDEOBUF_ERROR:
- dprintk(1, "dqbuf: state is error\n");
- break;
- case VIDEOBUF_DONE:
- dprintk(1, "dqbuf: state is done\n");
- break;
- default:
- dprintk(1, "dqbuf: state invalid\n");
- retval = -EINVAL;
- goto done;
- }
- CALL(q, sync, q, buf);
- videobuf_status(q, b, buf, q->type);
- list_del(&buf->stream);
- buf->state = VIDEOBUF_IDLE;
- b->flags &= ~V4L2_BUF_FLAG_DONE;
-done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_dqbuf);
-
-int videobuf_streamon(struct videobuf_queue *q)
-{
- struct videobuf_buffer *buf;
- unsigned long flags = 0;
- int retval;
-
- videobuf_queue_lock(q);
- retval = -EBUSY;
- if (q->reading)
- goto done;
- retval = 0;
- if (q->streaming)
- goto done;
- q->streaming = 1;
- spin_lock_irqsave(q->irqlock, flags);
- list_for_each_entry(buf, &q->stream, stream)
- if (buf->state == VIDEOBUF_PREPARED)
- q->ops->buf_queue(q, buf);
- spin_unlock_irqrestore(q->irqlock, flags);
-
- wake_up_interruptible_sync(&q->wait);
-done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_streamon);
-
-/* Locking: Caller holds q->vb_lock */
-static int __videobuf_streamoff(struct videobuf_queue *q)
-{
- if (!q->streaming)
- return -EINVAL;
-
- videobuf_queue_cancel(q);
-
- return 0;
-}
-
-int videobuf_streamoff(struct videobuf_queue *q)
-{
- int retval;
-
- videobuf_queue_lock(q);
- retval = __videobuf_streamoff(q);
- videobuf_queue_unlock(q);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_streamoff);
-
-/* Locking: Caller holds q->vb_lock */
-static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
- char __user *data,
- size_t count, loff_t *ppos)
-{
- enum v4l2_field field;
- unsigned long flags = 0;
- int retval;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- /* setup stuff */
- q->read_buf = videobuf_alloc_vb(q);
- if (NULL == q->read_buf)
- return -ENOMEM;
-
- q->read_buf->memory = V4L2_MEMORY_USERPTR;
- q->read_buf->baddr = (unsigned long)data;
- q->read_buf->bsize = count;
-
- field = videobuf_next_field(q);
- retval = q->ops->buf_prepare(q, q->read_buf, field);
- if (0 != retval)
- goto done;
-
- /* start capture & wait */
- spin_lock_irqsave(q->irqlock, flags);
- q->ops->buf_queue(q, q->read_buf);
- spin_unlock_irqrestore(q->irqlock, flags);
- retval = videobuf_waiton(q, q->read_buf, 0, 0);
- if (0 == retval) {
- CALL(q, sync, q, q->read_buf);
- if (VIDEOBUF_ERROR == q->read_buf->state)
- retval = -EIO;
- else
- retval = q->read_buf->size;
- }
-
-done:
- /* cleanup */
- q->ops->buf_release(q, q->read_buf);
- kfree(q->read_buf);
- q->read_buf = NULL;
- return retval;
-}
-
-static int __videobuf_copy_to_user(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- char __user *data, size_t count,
- int nonblocking)
-{
- void *vaddr = CALL(q, vaddr, buf);
-
- /* copy to userspace */
- if (count > buf->size - q->read_off)
- count = buf->size - q->read_off;
-
- if (copy_to_user(data, vaddr + q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking)
-{
- unsigned int *fc = CALL(q, vaddr, buf);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc += (buf->size >> 2) - 1;
- *fc = buf->field_count >> 1;
- dprintk(1, "vbihack: %d\n", *fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
-
- if ((count == -EFAULT) && (pos == 0))
- return -EFAULT;
-
- return count;
-}
-
-ssize_t videobuf_read_one(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int nonblocking)
-{
- enum v4l2_field field;
- unsigned long flags = 0;
- unsigned size = 0, nbufs = 1;
- int retval;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- videobuf_queue_lock(q);
-
- q->ops->buf_setup(q, &nbufs, &size);
-
- if (NULL == q->read_buf &&
- count >= size &&
- !nonblocking) {
- retval = videobuf_read_zerocopy(q, data, count, ppos);
- if (retval >= 0 || retval == -EIO)
- /* ok, all done */
- goto done;
- /* fallback to kernel bounce buffer on failures */
- }
-
- if (NULL == q->read_buf) {
- /* need to capture a new frame */
- retval = -ENOMEM;
- q->read_buf = videobuf_alloc_vb(q);
-
- dprintk(1, "video alloc=0x%p\n", q->read_buf);
- if (NULL == q->read_buf)
- goto done;
- q->read_buf->memory = V4L2_MEMORY_USERPTR;
- q->read_buf->bsize = count; /* preferred size */
- field = videobuf_next_field(q);
- retval = q->ops->buf_prepare(q, q->read_buf, field);
-
- if (0 != retval) {
- kfree(q->read_buf);
- q->read_buf = NULL;
- goto done;
- }
-
- spin_lock_irqsave(q->irqlock, flags);
- q->ops->buf_queue(q, q->read_buf);
- spin_unlock_irqrestore(q->irqlock, flags);
-
- q->read_off = 0;
- }
-
- /* wait until capture is done */
- retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
- if (0 != retval)
- goto done;
-
- CALL(q, sync, q, q->read_buf);
-
- if (VIDEOBUF_ERROR == q->read_buf->state) {
- /* catch I/O errors */
- q->ops->buf_release(q, q->read_buf);
- kfree(q->read_buf);
- q->read_buf = NULL;
- retval = -EIO;
- goto done;
- }
-
- /* Copy to userspace */
- retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
- if (retval < 0)
- goto done;
-
- q->read_off += retval;
- if (q->read_off == q->read_buf->size) {
- /* all data copied, cleanup */
- q->ops->buf_release(q, q->read_buf);
- kfree(q->read_buf);
- q->read_buf = NULL;
- }
-
-done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_read_one);
-
-/* Locking: Caller holds q->vb_lock */
-static int __videobuf_read_start(struct videobuf_queue *q)
-{
- enum v4l2_field field;
- unsigned long flags = 0;
- unsigned int count = 0, size = 0;
- int err, i;
-
- q->ops->buf_setup(q, &count, &size);
- if (count < 2)
- count = 2;
- if (count > VIDEO_MAX_FRAME)
- count = VIDEO_MAX_FRAME;
- size = PAGE_ALIGN(size);
-
- err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
- if (err < 0)
- return err;
-
- count = err;
-
- for (i = 0; i < count; i++) {
- field = videobuf_next_field(q);
- err = q->ops->buf_prepare(q, q->bufs[i], field);
- if (err)
- return err;
- list_add_tail(&q->bufs[i]->stream, &q->stream);
- }
- spin_lock_irqsave(q->irqlock, flags);
- for (i = 0; i < count; i++)
- q->ops->buf_queue(q, q->bufs[i]);
- spin_unlock_irqrestore(q->irqlock, flags);
- q->reading = 1;
- return 0;
-}
-
-static void __videobuf_read_stop(struct videobuf_queue *q)
-{
- int i;
-
- videobuf_queue_cancel(q);
- __videobuf_free(q);
- INIT_LIST_HEAD(&q->stream);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- kfree(q->bufs[i]);
- q->bufs[i] = NULL;
- }
- q->read_buf = NULL;
-}
-
-int videobuf_read_start(struct videobuf_queue *q)
-{
- int rc;
-
- videobuf_queue_lock(q);
- rc = __videobuf_read_start(q);
- videobuf_queue_unlock(q);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(videobuf_read_start);
-
-void videobuf_read_stop(struct videobuf_queue *q)
-{
- videobuf_queue_lock(q);
- __videobuf_read_stop(q);
- videobuf_queue_unlock(q);
-}
-EXPORT_SYMBOL_GPL(videobuf_read_stop);
-
-void videobuf_stop(struct videobuf_queue *q)
-{
- videobuf_queue_lock(q);
-
- if (q->streaming)
- __videobuf_streamoff(q);
-
- if (q->reading)
- __videobuf_read_stop(q);
-
- videobuf_queue_unlock(q);
-}
-EXPORT_SYMBOL_GPL(videobuf_stop);
-
-ssize_t videobuf_read_stream(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int vbihack, int nonblocking)
-{
- int rc, retval;
- unsigned long flags = 0;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- dprintk(2, "%s\n", __func__);
- videobuf_queue_lock(q);
- retval = -EBUSY;
- if (q->streaming)
- goto done;
- if (!q->reading) {
- retval = __videobuf_read_start(q);
- if (retval < 0)
- goto done;
- }
-
- retval = 0;
- while (count > 0) {
- /* get / wait for data */
- if (NULL == q->read_buf) {
- q->read_buf = list_entry(q->stream.next,
- struct videobuf_buffer,
- stream);
- list_del(&q->read_buf->stream);
- q->read_off = 0;
- }
- rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
- if (rc < 0) {
- if (0 == retval)
- retval = rc;
- break;
- }
-
- if (q->read_buf->state == VIDEOBUF_DONE) {
- rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
- retval, vbihack, nonblocking);
- if (rc < 0) {
- retval = rc;
- break;
- }
- retval += rc;
- count -= rc;
- q->read_off += rc;
- } else {
- /* some error */
- q->read_off = q->read_buf->size;
- if (0 == retval)
- retval = -EIO;
- }
-
- /* requeue buffer when done with copying */
- if (q->read_off == q->read_buf->size) {
- list_add_tail(&q->read_buf->stream,
- &q->stream);
- spin_lock_irqsave(q->irqlock, flags);
- q->ops->buf_queue(q, q->read_buf);
- spin_unlock_irqrestore(q->irqlock, flags);
- q->read_buf = NULL;
- }
- if (retval < 0)
- break;
- }
-
-done:
- videobuf_queue_unlock(q);
- return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_read_stream);
-
-unsigned int videobuf_poll_stream(struct file *file,
- struct videobuf_queue *q,
- poll_table *wait)
-{
- unsigned long req_events = poll_requested_events(wait);
- struct videobuf_buffer *buf = NULL;
- unsigned int rc = 0;
-
- videobuf_queue_lock(q);
- if (q->streaming) {
- if (!list_empty(&q->stream))
- buf = list_entry(q->stream.next,
- struct videobuf_buffer, stream);
- } else if (req_events & (POLLIN | POLLRDNORM)) {
- if (!q->reading)
- __videobuf_read_start(q);
- if (!q->reading) {
- rc = POLLERR;
- } else if (NULL == q->read_buf) {
- q->read_buf = list_entry(q->stream.next,
- struct videobuf_buffer,
- stream);
- list_del(&q->read_buf->stream);
- q->read_off = 0;
- }
- buf = q->read_buf;
- }
- if (!buf)
- rc = POLLERR;
-
- if (0 == rc) {
- poll_wait(file, &buf->done, wait);
- if (buf->state == VIDEOBUF_DONE ||
- buf->state == VIDEOBUF_ERROR) {
- switch (q->type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- case V4L2_BUF_TYPE_VBI_OUTPUT:
- case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- rc = POLLOUT | POLLWRNORM;
- break;
- default:
- rc = POLLIN | POLLRDNORM;
- break;
- }
- }
- }
- videobuf_queue_unlock(q);
- return rc;
-}
-EXPORT_SYMBOL_GPL(videobuf_poll_stream);
-
-int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
-{
- int rc = -EINVAL;
- int i;
-
- MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
- dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
- return -EINVAL;
- }
-
- videobuf_queue_lock(q);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- struct videobuf_buffer *buf = q->bufs[i];
-
- if (buf && buf->memory == V4L2_MEMORY_MMAP &&
- buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
- rc = CALL(q, mmap_mapper, q, buf, vma);
- break;
- }
- }
- videobuf_queue_unlock(q);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
deleted file mode 100644
index 65411adcd0ea..000000000000
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * helper functions for physically contiguous capture buffers
- *
- * The functions support hardware lacking scatter gather support
- * (i.e. the buffers must be linear in physical memory)
- *
- * Copyright (c) 2008 Magnus Damm
- *
- * Based on videobuf-vmalloc.c,
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <media/videobuf-dma-contig.h>
-
-struct videobuf_dma_contig_memory {
- u32 magic;
- void *vaddr;
- dma_addr_t dma_handle;
- unsigned long size;
-};
-
-#define MAGIC_DC_MEM 0x0733ac61
-#define MAGIC_CHECK(is, should) \
- if (unlikely((is) != (should))) { \
- pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
- BUG(); \
- }
-
-static int __videobuf_dc_alloc(struct device *dev,
- struct videobuf_dma_contig_memory *mem,
- unsigned long size, gfp_t flags)
-{
- mem->size = size;
- mem->vaddr = dma_alloc_coherent(dev, mem->size,
- &mem->dma_handle, flags);
-
- if (!mem->vaddr) {
- dev_err(dev, "memory alloc size %ld failed\n", mem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
-
- return 0;
-}
-
-static void __videobuf_dc_free(struct device *dev,
- struct videobuf_dma_contig_memory *mem)
-{
- dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
-
- mem->vaddr = NULL;
-}
-
-static void videobuf_vm_open(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
-
- dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
- map, map->count, vma->vm_start, vma->vm_end);
-
- videobuf_queue_lock(q);
- map->count++;
- videobuf_queue_unlock(q);
-}
-
-static void videobuf_vm_close(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- int i;
-
- dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
- map, map->count, vma->vm_start, vma->vm_end);
-
- videobuf_queue_lock(q);
- if (!--map->count) {
- struct videobuf_dma_contig_memory *mem;
-
- dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
-
- /* We need first to cancel streams, before unmapping */
- if (q->streaming)
- videobuf_queue_cancel(q);
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
-
- if (q->bufs[i]->map != map)
- continue;
-
- mem = q->bufs[i]->priv;
- if (mem) {
- /* This callback is called only if kernel has
- allocated memory and this memory is mmapped.
- In this case, memory should be freed,
- in order to do memory unmap.
- */
-
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- /* vfree is not atomic - can't be
- called with IRQ's disabled
- */
- dev_dbg(q->dev, "buf[%d] freeing %p\n",
- i, mem->vaddr);
-
- __videobuf_dc_free(q->dev, mem);
- mem->vaddr = NULL;
- }
-
- q->bufs[i]->map = NULL;
- q->bufs[i]->baddr = 0;
- }
-
- kfree(map);
-
- }
- videobuf_queue_unlock(q);
-}
-
-static const struct vm_operations_struct videobuf_vm_ops = {
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
-};
-
-/**
- * videobuf_dma_contig_user_put() - reset pointer to user space buffer
- * @mem: per-buffer private videobuf-dma-contig data
- *
- * This function resets the user space pointer
- */
-static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
-{
- mem->dma_handle = 0;
- mem->size = 0;
-}
-
-/**
- * videobuf_dma_contig_user_get() - setup user space memory pointer
- * @mem: per-buffer private videobuf-dma-contig data
- * @vb: video buffer to map
- *
- * This function validates and sets up a pointer to user space memory.
- * Only physically contiguous pfn-mapped memory is accepted.
- *
- * Returns 0 if successful.
- */
-static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
- struct videobuf_buffer *vb)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long prev_pfn, this_pfn;
- unsigned long pages_done, user_address;
- unsigned int offset;
- int ret;
-
- offset = vb->baddr & ~PAGE_MASK;
- mem->size = PAGE_ALIGN(vb->size + offset);
- ret = -EINVAL;
-
- down_read(&mm->mmap_sem);
-
- vma = find_vma(mm, vb->baddr);
- if (!vma)
- goto out_up;
-
- if ((vb->baddr + mem->size) > vma->vm_end)
- goto out_up;
-
- pages_done = 0;
- prev_pfn = 0; /* kill warning */
- user_address = vb->baddr;
-
- while (pages_done < (mem->size >> PAGE_SHIFT)) {
- ret = follow_pfn(vma, user_address, &this_pfn);
- if (ret)
- break;
-
- if (pages_done == 0)
- mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
- else if (this_pfn != (prev_pfn + 1))
- ret = -EFAULT;
-
- if (ret)
- break;
-
- prev_pfn = this_pfn;
- user_address += PAGE_SIZE;
- pages_done++;
- }
-
-out_up:
- up_read(&current->mm->mmap_sem);
-
- return ret;
-}
-
-static struct videobuf_buffer *__videobuf_alloc(size_t size)
-{
- struct videobuf_dma_contig_memory *mem;
- struct videobuf_buffer *vb;
-
- vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
- if (vb) {
- vb->priv = ((char *)vb) + size;
- mem = vb->priv;
- mem->magic = MAGIC_DC_MEM;
- }
-
- return vb;
-}
-
-static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
-{
- struct videobuf_dma_contig_memory *mem = buf->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- return mem->vaddr;
-}
-
-static int __videobuf_iolock(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
-{
- struct videobuf_dma_contig_memory *mem = vb->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- switch (vb->memory) {
- case V4L2_MEMORY_MMAP:
- dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
-
- /* All handling should be done by __videobuf_mmap_mapper() */
- if (!mem->vaddr) {
- dev_err(q->dev, "memory is not alloced/mmapped.\n");
- return -EINVAL;
- }
- break;
- case V4L2_MEMORY_USERPTR:
- dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
-
- /* handle pointer from user space */
- if (vb->baddr)
- return videobuf_dma_contig_user_get(mem, vb);
-
- /* allocate memory for the read() method */
- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
- GFP_KERNEL))
- return -ENOMEM;
- break;
- case V4L2_MEMORY_OVERLAY:
- default:
- dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- struct vm_area_struct *vma)
-{
- struct videobuf_dma_contig_memory *mem;
- struct videobuf_mapping *map;
- int retval;
- unsigned long size;
-
- dev_dbg(q->dev, "%s\n", __func__);
-
- /* create mapping + update buffer list */
- map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- buf->map = map;
- map->q = q;
-
- buf->baddr = vma->vm_start;
-
- mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
- GFP_KERNEL | __GFP_COMP))
- goto error;
-
- /* Try to remap memory */
- size = vma->vm_end - vma->vm_start;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- retval = vm_iomap_memory(vma, vma->vm_start, size);
- if (retval) {
- dev_err(q->dev, "mmap: remap failed with error %d. ",
- retval);
- dma_free_coherent(q->dev, mem->size,
- mem->vaddr, mem->dma_handle);
- goto error;
- }
-
- vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND;
- vma->vm_private_data = map;
-
- dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
- map, q, vma->vm_start, vma->vm_end,
- (long int)buf->bsize, vma->vm_pgoff, buf->i);
-
- videobuf_vm_open(vma);
-
- return 0;
-
-error:
- kfree(map);
- return -ENOMEM;
-}
-
-static struct videobuf_qtype_ops qops = {
- .magic = MAGIC_QTYPE_OPS,
- .alloc_vb = __videobuf_alloc,
- .iolock = __videobuf_iolock,
- .mmap_mapper = __videobuf_mmap_mapper,
- .vaddr = __videobuf_to_vaddr,
-};
-
-void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock)
-{
- videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &qops, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
-
-dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
-{
- struct videobuf_dma_contig_memory *mem = buf->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- return mem->dma_handle;
-}
-EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
-
-void videobuf_dma_contig_free(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- struct videobuf_dma_contig_memory *mem = buf->priv;
-
- /* mmapped memory can't be freed here, otherwise mmapped region
- would be released, while still needed. In this case, the memory
- release should happen inside videobuf_vm_close().
- So, it should free memory only if the memory were allocated for
- read() operation.
- */
- if (buf->memory != V4L2_MEMORY_USERPTR)
- return;
-
- if (!mem)
- return;
-
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- /* handle user space pointer case */
- if (buf->baddr) {
- videobuf_dma_contig_user_put(mem);
- return;
- }
-
- /* read() method */
- if (mem->vaddr) {
- __videobuf_dc_free(q->dev, mem);
- mem->vaddr = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
-
-MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
-MODULE_AUTHOR("Magnus Damm");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
deleted file mode 100644
index 9db674ccdc68..000000000000
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ /dev/null
@@ -1,635 +0,0 @@
-/*
- * helper functions for SG DMA video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
- * (c) 2006 Ted Walther and John Sokol
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/scatterlist.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#include <media/videobuf-dma-sg.h>
-
-#define MAGIC_DMABUF 0x19721112
-#define MAGIC_SG_MEM 0x17890714
-
-#define MAGIC_CHECK(is, should) \
- if (unlikely((is) != (should))) { \
- printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
- is, should); \
- BUG(); \
- }
-
-static int debug;
-module_param(debug, int, 0644);
-
-MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
-MODULE_LICENSE("GPL");
-
-#define dprintk(level, fmt, arg...) \
- if (debug >= level) \
- printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Return a scatterlist for some page-aligned vmalloc()'ed memory
- * block (NULL on errors). Memory for the scatterlist is allocated
- * using kmalloc. The caller must free the memory.
- */
-static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt,
- int nr_pages)
-{
- struct scatterlist *sglist;
- struct page *pg;
- int i;
-
- sglist = vzalloc(nr_pages * sizeof(*sglist));
- if (NULL == sglist)
- return NULL;
- sg_init_table(sglist, nr_pages);
- for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
- pg = vmalloc_to_page(virt);
- if (NULL == pg)
- goto err;
- BUG_ON(PageHighMem(pg));
- sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
- }
- return sglist;
-
-err:
- vfree(sglist);
- return NULL;
-}
-
-/*
- * Return a scatterlist for a an array of userpages (NULL on errors).
- * Memory for the scatterlist is allocated using kmalloc. The caller
- * must free the memory.
- */
-static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
- int nr_pages, int offset, size_t size)
-{
- struct scatterlist *sglist;
- int i;
-
- if (NULL == pages[0])
- return NULL;
- sglist = vmalloc(nr_pages * sizeof(*sglist));
- if (NULL == sglist)
- return NULL;
- sg_init_table(sglist, nr_pages);
-
- if (PageHighMem(pages[0]))
- /* DMA to highmem pages might not work */
- goto highmem;
- sg_set_page(&sglist[0], pages[0],
- min_t(size_t, PAGE_SIZE - offset, size), offset);
- size -= min_t(size_t, PAGE_SIZE - offset, size);
- for (i = 1; i < nr_pages; i++) {
- if (NULL == pages[i])
- goto nopage;
- if (PageHighMem(pages[i]))
- goto highmem;
- sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
- size -= min_t(size_t, PAGE_SIZE, size);
- }
- return sglist;
-
-nopage:
- dprintk(2, "sgl: oops - no page\n");
- vfree(sglist);
- return NULL;
-
-highmem:
- dprintk(2, "sgl: oops - highmem page\n");
- vfree(sglist);
- return NULL;
-}
-
-/* --------------------------------------------------------------------- */
-
-struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
-{
- struct videobuf_dma_sg_memory *mem = buf->priv;
- BUG_ON(!mem);
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- return &mem->dma;
-}
-EXPORT_SYMBOL_GPL(videobuf_to_dma);
-
-void videobuf_dma_init(struct videobuf_dmabuf *dma)
-{
- memset(dma, 0, sizeof(*dma));
- dma->magic = MAGIC_DMABUF;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_init);
-
-static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
- int direction, unsigned long data, unsigned long size)
-{
- unsigned long first, last;
- int err, rw = 0;
-
- dma->direction = direction;
- switch (dma->direction) {
- case DMA_FROM_DEVICE:
- rw = READ;
- break;
- case DMA_TO_DEVICE:
- rw = WRITE;
- break;
- default:
- BUG();
- }
-
- first = (data & PAGE_MASK) >> PAGE_SHIFT;
- last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
- dma->offset = data & ~PAGE_MASK;
- dma->size = size;
- dma->nr_pages = last-first+1;
- dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
- if (NULL == dma->pages)
- return -ENOMEM;
-
- dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
- data, size, dma->nr_pages);
-
- err = get_user_pages(current, current->mm,
- data & PAGE_MASK, dma->nr_pages,
- rw == READ, 1, /* force */
- dma->pages, NULL);
-
- if (err != dma->nr_pages) {
- dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
- return err < 0 ? err : -EINVAL;
- }
- return 0;
-}
-
-int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
- unsigned long data, unsigned long size)
-{
- int ret;
-
- down_read(&current->mm->mmap_sem);
- ret = videobuf_dma_init_user_locked(dma, direction, data, size);
- up_read(&current->mm->mmap_sem);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
-
-int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
- int nr_pages)
-{
- dprintk(1, "init kernel [%d pages]\n", nr_pages);
-
- dma->direction = direction;
- dma->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
- if (NULL == dma->vaddr) {
- dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
- return -ENOMEM;
- }
-
- dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
- (unsigned long)dma->vaddr,
- nr_pages << PAGE_SHIFT);
-
- memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
- dma->nr_pages = nr_pages;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
-
-int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
- dma_addr_t addr, int nr_pages)
-{
- dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n",
- nr_pages, (unsigned long)addr);
- dma->direction = direction;
-
- if (0 == addr)
- return -EINVAL;
-
- dma->bus_addr = addr;
- dma->nr_pages = nr_pages;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
-
-int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
-{
- MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
- BUG_ON(0 == dma->nr_pages);
-
- if (dma->pages) {
- dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
- dma->offset, dma->size);
- }
- if (dma->vaddr) {
- dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
- dma->nr_pages);
- }
- if (dma->bus_addr) {
- dma->sglist = vmalloc(sizeof(*dma->sglist));
- if (NULL != dma->sglist) {
- dma->sglen = 1;
- sg_dma_address(&dma->sglist[0]) = dma->bus_addr
- & PAGE_MASK;
- dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
- sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
- }
- }
- if (NULL == dma->sglist) {
- dprintk(1, "scatterlist is NULL\n");
- return -ENOMEM;
- }
- if (!dma->bus_addr) {
- dma->sglen = dma_map_sg(dev, dma->sglist,
- dma->nr_pages, dma->direction);
- if (0 == dma->sglen) {
- printk(KERN_WARNING
- "%s: videobuf_map_sg failed\n", __func__);
- vfree(dma->sglist);
- dma->sglist = NULL;
- dma->sglen = 0;
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_map);
-
-int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
-{
- MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
-
- if (!dma->sglen)
- return 0;
-
- dma_unmap_sg(dev, dma->sglist, dma->sglen, dma->direction);
-
- vfree(dma->sglist);
- dma->sglist = NULL;
- dma->sglen = 0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
-
-int videobuf_dma_free(struct videobuf_dmabuf *dma)
-{
- int i;
- MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
- BUG_ON(dma->sglen);
-
- if (dma->pages) {
- for (i = 0; i < dma->nr_pages; i++)
- page_cache_release(dma->pages[i]);
- kfree(dma->pages);
- dma->pages = NULL;
- }
-
- vfree(dma->vaddr);
- dma->vaddr = NULL;
-
- if (dma->bus_addr)
- dma->bus_addr = 0;
- dma->direction = DMA_NONE;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_free);
-
-/* --------------------------------------------------------------------- */
-
-static void videobuf_vm_open(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
-
- dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
- map->count, vma->vm_start, vma->vm_end);
-
- videobuf_queue_lock(q);
- map->count++;
- videobuf_queue_unlock(q);
-}
-
-static void videobuf_vm_close(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- struct videobuf_dma_sg_memory *mem;
- int i;
-
- dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
- map->count, vma->vm_start, vma->vm_end);
-
- videobuf_queue_lock(q);
- if (!--map->count) {
- dprintk(1, "munmap %p q=%p\n", map, q);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- mem = q->bufs[i]->priv;
- if (!mem)
- continue;
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- if (q->bufs[i]->map != map)
- continue;
- q->bufs[i]->map = NULL;
- q->bufs[i]->baddr = 0;
- q->ops->buf_release(q, q->bufs[i]);
- }
- kfree(map);
- }
- videobuf_queue_unlock(q);
- return;
-}
-
-/*
- * Get a anonymous page for the mapping. Make sure we can DMA to that
- * memory location with 32bit PCI devices (i.e. don't use highmem for
- * now ...). Bounce buffers don't work very well for the data rates
- * video capture has.
- */
-static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *page;
-
- dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
- (unsigned long)vmf->virtual_address,
- vma->vm_start, vma->vm_end);
-
- page = alloc_page(GFP_USER | __GFP_DMA32);
- if (!page)
- return VM_FAULT_OOM;
- clear_user_highpage(page, (unsigned long)vmf->virtual_address);
- vmf->page = page;
-
- return 0;
-}
-
-static const struct vm_operations_struct videobuf_vm_ops = {
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
- .fault = videobuf_vm_fault,
-};
-
-/* ---------------------------------------------------------------------
- * SG handlers for the generic methods
- */
-
-/* Allocated area consists on 3 parts:
- struct video_buffer
- struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
- struct videobuf_dma_sg_memory
- */
-
-static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
-{
- struct videobuf_dma_sg_memory *mem;
- struct videobuf_buffer *vb;
-
- vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
- if (!vb)
- return vb;
-
- mem = vb->priv = ((char *)vb) + size;
- mem->magic = MAGIC_SG_MEM;
-
- videobuf_dma_init(&mem->dma);
-
- dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
- mem, (long)sizeof(*mem));
-
- return vb;
-}
-
-static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
-{
- struct videobuf_dma_sg_memory *mem = buf->priv;
- BUG_ON(!mem);
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- return mem->dma.vaddr;
-}
-
-static int __videobuf_iolock(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
-{
- int err, pages;
- dma_addr_t bus;
- struct videobuf_dma_sg_memory *mem = vb->priv;
- BUG_ON(!mem);
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- switch (vb->memory) {
- case V4L2_MEMORY_MMAP:
- case V4L2_MEMORY_USERPTR:
- if (0 == vb->baddr) {
- /* no userspace addr -- kernel bounce buffer */
- pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
- err = videobuf_dma_init_kernel(&mem->dma,
- DMA_FROM_DEVICE,
- pages);
- if (0 != err)
- return err;
- } else if (vb->memory == V4L2_MEMORY_USERPTR) {
- /* dma directly to userspace */
- err = videobuf_dma_init_user(&mem->dma,
- DMA_FROM_DEVICE,
- vb->baddr, vb->bsize);
- if (0 != err)
- return err;
- } else {
- /* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
- buffers can only be called from videobuf_qbuf
- we take current->mm->mmap_sem there, to prevent
- locking inversion, so don't take it here */
-
- err = videobuf_dma_init_user_locked(&mem->dma,
- DMA_FROM_DEVICE,
- vb->baddr, vb->bsize);
- if (0 != err)
- return err;
- }
- break;
- case V4L2_MEMORY_OVERLAY:
- if (NULL == fbuf)
- return -EINVAL;
- /* FIXME: need sanity checks for vb->boff */
- /*
- * Using a double cast to avoid compiler warnings when
- * building for PAE. Compiler doesn't like direct casting
- * of a 32 bit ptr to 64 bit integer.
- */
- bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
- pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
- err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
- bus, pages);
- if (0 != err)
- return err;
- break;
- default:
- BUG();
- }
- err = videobuf_dma_map(q->dev, &mem->dma);
- if (0 != err)
- return err;
-
- return 0;
-}
-
-static int __videobuf_sync(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- struct videobuf_dma_sg_memory *mem = buf->priv;
- BUG_ON(!mem || !mem->dma.sglen);
-
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
- MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
-
- dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
- mem->dma.sglen, mem->dma.direction);
-
- return 0;
-}
-
-static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- struct vm_area_struct *vma)
-{
- struct videobuf_dma_sg_memory *mem = buf->priv;
- struct videobuf_mapping *map;
- unsigned int first, last, size = 0, i;
- int retval;
-
- retval = -EINVAL;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
- /* look for first buffer to map */
- for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (buf == q->bufs[first]) {
- size = PAGE_ALIGN(q->bufs[first]->bsize);
- break;
- }
- }
-
- /* paranoia, should never happen since buf is always valid. */
- if (!size) {
- dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
- (vma->vm_pgoff << PAGE_SHIFT));
- goto done;
- }
-
- last = first;
-
- /* create mapping + update buffer list */
- retval = -ENOMEM;
- map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
- if (NULL == map)
- goto done;
-
- size = 0;
- for (i = first; i <= last; i++) {
- if (NULL == q->bufs[i])
- continue;
- q->bufs[i]->map = map;
- q->bufs[i]->baddr = vma->vm_start + size;
- size += PAGE_ALIGN(q->bufs[i]->bsize);
- }
-
- map->count = 1;
- map->q = q;
- vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
- vma->vm_private_data = map;
- dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
- map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
- retval = 0;
-
-done:
- return retval;
-}
-
-static struct videobuf_qtype_ops sg_ops = {
- .magic = MAGIC_QTYPE_OPS,
-
- .alloc_vb = __videobuf_alloc_vb,
- .iolock = __videobuf_iolock,
- .sync = __videobuf_sync,
- .mmap_mapper = __videobuf_mmap_mapper,
- .vaddr = __videobuf_to_vaddr,
-};
-
-void *videobuf_sg_alloc(size_t size)
-{
- struct videobuf_queue q;
-
- /* Required to make generic handler to call __videobuf_alloc */
- q.int_ops = &sg_ops;
-
- q.msize = size;
-
- return videobuf_alloc_vb(&q);
-}
-EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
-
-void videobuf_queue_sg_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock)
-{
- videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &sg_ops, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
-
diff --git a/drivers/media/v4l2-core/videobuf-dvb.c b/drivers/media/v4l2-core/videobuf-dvb.c
deleted file mode 100644
index b7efa4516d36..000000000000
--- a/drivers/media/v4l2-core/videobuf-dvb.c
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- *
- * some helper function for simple DVB cards which simply DMA the
- * complete transport stream and let the computer sort everything else
- * (i.e. we are using the software demux, ...). Also uses the
- * video-buf to manage DMA buffers.
- *
- * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/kthread.h>
-#include <linux/file.h>
-#include <linux/slab.h>
-
-#include <linux/freezer.h>
-
-#include <media/videobuf-core.h>
-#include <media/videobuf-dvb.h>
-
-/* ------------------------------------------------------------------ */
-
-MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
-MODULE_LICENSE("GPL");
-
-static unsigned int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug,"enable debug messages");
-
-#define dprintk(fmt, arg...) if (debug) \
- printk(KERN_DEBUG "%s/dvb: " fmt, dvb->name , ## arg)
-
-/* ------------------------------------------------------------------ */
-
-static int videobuf_dvb_thread(void *data)
-{
- struct videobuf_dvb *dvb = data;
- struct videobuf_buffer *buf;
- unsigned long flags;
- void *outp;
-
- dprintk("dvb thread started\n");
- set_freezable();
- videobuf_read_start(&dvb->dvbq);
-
- for (;;) {
- /* fetch next buffer */
- buf = list_entry(dvb->dvbq.stream.next,
- struct videobuf_buffer, stream);
- list_del(&buf->stream);
- videobuf_waiton(&dvb->dvbq, buf, 0, 1);
-
- /* no more feeds left or stop_feed() asked us to quit */
- if (0 == dvb->nfeeds)
- break;
- if (kthread_should_stop())
- break;
- try_to_freeze();
-
- /* feed buffer data to demux */
- outp = videobuf_queue_to_vaddr(&dvb->dvbq, buf);
-
- if (buf->state == VIDEOBUF_DONE)
- dvb_dmx_swfilter(&dvb->demux, outp,
- buf->size);
-
- /* requeue buffer */
- list_add_tail(&buf->stream,&dvb->dvbq.stream);
- spin_lock_irqsave(dvb->dvbq.irqlock,flags);
- dvb->dvbq.ops->buf_queue(&dvb->dvbq,buf);
- spin_unlock_irqrestore(dvb->dvbq.irqlock,flags);
- }
-
- videobuf_read_stop(&dvb->dvbq);
- dprintk("dvb thread stopped\n");
-
- /* Hmm, linux becomes *very* unhappy without this ... */
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- }
- return 0;
-}
-
-static int videobuf_dvb_start_feed(struct dvb_demux_feed *feed)
-{
- struct dvb_demux *demux = feed->demux;
- struct videobuf_dvb *dvb = demux->priv;
- int rc;
-
- if (!demux->dmx.frontend)
- return -EINVAL;
-
- mutex_lock(&dvb->lock);
- dvb->nfeeds++;
- rc = dvb->nfeeds;
-
- if (NULL != dvb->thread)
- goto out;
- dvb->thread = kthread_run(videobuf_dvb_thread,
- dvb, "%s dvb", dvb->name);
- if (IS_ERR(dvb->thread)) {
- rc = PTR_ERR(dvb->thread);
- dvb->thread = NULL;
- }
-
-out:
- mutex_unlock(&dvb->lock);
- return rc;
-}
-
-static int videobuf_dvb_stop_feed(struct dvb_demux_feed *feed)
-{
- struct dvb_demux *demux = feed->demux;
- struct videobuf_dvb *dvb = demux->priv;
- int err = 0;
-
- mutex_lock(&dvb->lock);
- dvb->nfeeds--;
- if (0 == dvb->nfeeds && NULL != dvb->thread) {
- err = kthread_stop(dvb->thread);
- dvb->thread = NULL;
- }
- mutex_unlock(&dvb->lock);
- return err;
-}
-
-static int videobuf_dvb_register_adapter(struct videobuf_dvb_frontends *fe,
- struct module *module,
- void *adapter_priv,
- struct device *device,
- char *adapter_name,
- short *adapter_nr,
- int mfe_shared)
-{
- int result;
-
- mutex_init(&fe->lock);
-
- /* register adapter */
- result = dvb_register_adapter(&fe->adapter, adapter_name, module,
- device, adapter_nr);
- if (result < 0) {
- printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
- adapter_name, result);
- }
- fe->adapter.priv = adapter_priv;
- fe->adapter.mfe_shared = mfe_shared;
-
- return result;
-}
-
-static int videobuf_dvb_register_frontend(struct dvb_adapter *adapter,
- struct videobuf_dvb *dvb)
-{
- int result;
-
- /* register frontend */
- result = dvb_register_frontend(adapter, dvb->frontend);
- if (result < 0) {
- printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
- dvb->name, result);
- goto fail_frontend;
- }
-
- /* register demux stuff */
- dvb->demux.dmx.capabilities =
- DMX_TS_FILTERING | DMX_SECTION_FILTERING |
- DMX_MEMORY_BASED_FILTERING;
- dvb->demux.priv = dvb;
- dvb->demux.filternum = 256;
- dvb->demux.feednum = 256;
- dvb->demux.start_feed = videobuf_dvb_start_feed;
- dvb->demux.stop_feed = videobuf_dvb_stop_feed;
- result = dvb_dmx_init(&dvb->demux);
- if (result < 0) {
- printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
- dvb->name, result);
- goto fail_dmx;
- }
-
- dvb->dmxdev.filternum = 256;
- dvb->dmxdev.demux = &dvb->demux.dmx;
- dvb->dmxdev.capabilities = 0;
- result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
-
- if (result < 0) {
- printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
- dvb->name, result);
- goto fail_dmxdev;
- }
-
- dvb->fe_hw.source = DMX_FRONTEND_0;
- result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
- if (result < 0) {
- printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
- dvb->name, result);
- goto fail_fe_hw;
- }
-
- dvb->fe_mem.source = DMX_MEMORY_FE;
- result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
- if (result < 0) {
- printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
- dvb->name, result);
- goto fail_fe_mem;
- }
-
- result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
- if (result < 0) {
- printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
- dvb->name, result);
- goto fail_fe_conn;
- }
-
- /* register network adapter */
- result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
- if (result < 0) {
- printk(KERN_WARNING "%s: dvb_net_init failed (errno = %d)\n",
- dvb->name, result);
- goto fail_fe_conn;
- }
- return 0;
-
-fail_fe_conn:
- dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
-fail_fe_mem:
- dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
-fail_fe_hw:
- dvb_dmxdev_release(&dvb->dmxdev);
-fail_dmxdev:
- dvb_dmx_release(&dvb->demux);
-fail_dmx:
- dvb_unregister_frontend(dvb->frontend);
-fail_frontend:
- dvb_frontend_detach(dvb->frontend);
- dvb->frontend = NULL;
-
- return result;
-}
-
-/* ------------------------------------------------------------------ */
-/* Register a single adapter and one or more frontends */
-int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
- struct module *module,
- void *adapter_priv,
- struct device *device,
- short *adapter_nr,
- int mfe_shared)
-{
- struct list_head *list, *q;
- struct videobuf_dvb_frontend *fe;
- int res;
-
- fe = videobuf_dvb_get_frontend(f, 1);
- if (!fe) {
- printk(KERN_WARNING "Unable to register the adapter which has no frontends\n");
- return -EINVAL;
- }
-
- /* Bring up the adapter */
- res = videobuf_dvb_register_adapter(f, module, adapter_priv, device,
- fe->dvb.name, adapter_nr, mfe_shared);
- if (res < 0) {
- printk(KERN_WARNING "videobuf_dvb_register_adapter failed (errno = %d)\n", res);
- return res;
- }
-
- /* Attach all of the frontends to the adapter */
- mutex_lock(&f->lock);
- list_for_each_safe(list, q, &f->felist) {
- fe = list_entry(list, struct videobuf_dvb_frontend, felist);
- res = videobuf_dvb_register_frontend(&f->adapter, &fe->dvb);
- if (res < 0) {
- printk(KERN_WARNING "%s: videobuf_dvb_register_frontend failed (errno = %d)\n",
- fe->dvb.name, res);
- goto err;
- }
- }
- mutex_unlock(&f->lock);
- return 0;
-
-err:
- mutex_unlock(&f->lock);
- videobuf_dvb_unregister_bus(f);
- return res;
-}
-EXPORT_SYMBOL(videobuf_dvb_register_bus);
-
-void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
-{
- videobuf_dvb_dealloc_frontends(f);
-
- dvb_unregister_adapter(&f->adapter);
-}
-EXPORT_SYMBOL(videobuf_dvb_unregister_bus);
-
-struct videobuf_dvb_frontend *videobuf_dvb_get_frontend(
- struct videobuf_dvb_frontends *f, int id)
-{
- struct list_head *list, *q;
- struct videobuf_dvb_frontend *fe, *ret = NULL;
-
- mutex_lock(&f->lock);
-
- list_for_each_safe(list, q, &f->felist) {
- fe = list_entry(list, struct videobuf_dvb_frontend, felist);
- if (fe->id == id) {
- ret = fe;
- break;
- }
- }
-
- mutex_unlock(&f->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(videobuf_dvb_get_frontend);
-
-int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f,
- struct dvb_frontend *p)
-{
- struct list_head *list, *q;
- struct videobuf_dvb_frontend *fe = NULL;
- int ret = 0;
-
- mutex_lock(&f->lock);
-
- list_for_each_safe(list, q, &f->felist) {
- fe = list_entry(list, struct videobuf_dvb_frontend, felist);
- if (fe->dvb.frontend == p) {
- ret = fe->id;
- break;
- }
- }
-
- mutex_unlock(&f->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(videobuf_dvb_find_frontend);
-
-struct videobuf_dvb_frontend *videobuf_dvb_alloc_frontend(
- struct videobuf_dvb_frontends *f, int id)
-{
- struct videobuf_dvb_frontend *fe;
-
- fe = kzalloc(sizeof(struct videobuf_dvb_frontend), GFP_KERNEL);
- if (fe == NULL)
- goto fail_alloc;
-
- fe->id = id;
- mutex_init(&fe->dvb.lock);
-
- mutex_lock(&f->lock);
- list_add_tail(&fe->felist, &f->felist);
- mutex_unlock(&f->lock);
-
-fail_alloc:
- return fe;
-}
-EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
-
-void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
-{
- struct list_head *list, *q;
- struct videobuf_dvb_frontend *fe;
-
- mutex_lock(&f->lock);
- list_for_each_safe(list, q, &f->felist) {
- fe = list_entry(list, struct videobuf_dvb_frontend, felist);
- if (fe->dvb.net.dvbdev) {
- dvb_net_release(&fe->dvb.net);
- fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
- &fe->dvb.fe_mem);
- fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
- &fe->dvb.fe_hw);
- dvb_dmxdev_release(&fe->dvb.dmxdev);
- dvb_dmx_release(&fe->dvb.demux);
- dvb_unregister_frontend(fe->dvb.frontend);
- }
- if (fe->dvb.frontend)
- /* always allocated, may have been reset */
- dvb_frontend_detach(fe->dvb.frontend);
- list_del(list); /* remove list entry */
- kfree(fe); /* free frontend allocation */
- }
- mutex_unlock(&f->lock);
-}
-EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
deleted file mode 100644
index 1365c651c177..000000000000
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * helper functions for vmalloc video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#include <media/videobuf-vmalloc.h>
-
-#define MAGIC_DMABUF 0x17760309
-#define MAGIC_VMAL_MEM 0x18221223
-
-#define MAGIC_CHECK(is, should) \
- if (unlikely((is) != (should))) { \
- printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
- is, should); \
- BUG(); \
- }
-
-static int debug;
-module_param(debug, int, 0644);
-
-MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
-MODULE_LICENSE("GPL");
-
-#define dprintk(level, fmt, arg...) \
- if (debug >= level) \
- printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
-
-
-/***************************************************************************/
-
-static void videobuf_vm_open(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
-
- dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
- map->count, vma->vm_start, vma->vm_end);
-
- videobuf_queue_lock(q);
- map->count++;
- videobuf_queue_unlock(q);
-}
-
-static void videobuf_vm_close(struct vm_area_struct *vma)
-{
- struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- int i;
-
- dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
- map->count, vma->vm_start, vma->vm_end);
-
- videobuf_queue_lock(q);
- if (!--map->count) {
- struct videobuf_vmalloc_memory *mem;
-
- dprintk(1, "munmap %p q=%p\n", map, q);
-
- /* We need first to cancel streams, before unmapping */
- if (q->streaming)
- videobuf_queue_cancel(q);
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
-
- if (q->bufs[i]->map != map)
- continue;
-
- mem = q->bufs[i]->priv;
- if (mem) {
- /* This callback is called only if kernel has
- allocated memory and this memory is mmapped.
- In this case, memory should be freed,
- in order to do memory unmap.
- */
-
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- /* vfree is not atomic - can't be
- called with IRQ's disabled
- */
- dprintk(1, "%s: buf[%d] freeing (%p)\n",
- __func__, i, mem->vaddr);
-
- vfree(mem->vaddr);
- mem->vaddr = NULL;
- }
-
- q->bufs[i]->map = NULL;
- q->bufs[i]->baddr = 0;
- }
-
- kfree(map);
-
- }
- videobuf_queue_unlock(q);
-
- return;
-}
-
-static const struct vm_operations_struct videobuf_vm_ops = {
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
-};
-
-/* ---------------------------------------------------------------------
- * vmalloc handlers for the generic methods
- */
-
-/* Allocated area consists on 3 parts:
- struct video_buffer
- struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
- struct videobuf_dma_sg_memory
- */
-
-static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
-{
- struct videobuf_vmalloc_memory *mem;
- struct videobuf_buffer *vb;
-
- vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
- if (!vb)
- return vb;
-
- mem = vb->priv = ((char *)vb) + size;
- mem->magic = MAGIC_VMAL_MEM;
-
- dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
- mem, (long)sizeof(*mem));
-
- return vb;
-}
-
-static int __videobuf_iolock(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
-{
- struct videobuf_vmalloc_memory *mem = vb->priv;
- int pages;
-
- BUG_ON(!mem);
-
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- switch (vb->memory) {
- case V4L2_MEMORY_MMAP:
- dprintk(1, "%s memory method MMAP\n", __func__);
-
- /* All handling should be done by __videobuf_mmap_mapper() */
- if (!mem->vaddr) {
- printk(KERN_ERR "memory is not alloced/mmapped.\n");
- return -EINVAL;
- }
- break;
- case V4L2_MEMORY_USERPTR:
- pages = PAGE_ALIGN(vb->size);
-
- dprintk(1, "%s memory method USERPTR\n", __func__);
-
- if (vb->baddr) {
- printk(KERN_ERR "USERPTR is currently not supported\n");
- return -EINVAL;
- }
-
- /* The only USERPTR currently supported is the one needed for
- * read() method.
- */
-
- mem->vaddr = vmalloc_user(pages);
- if (!mem->vaddr) {
- printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
- return -ENOMEM;
- }
- dprintk(1, "vmalloc is at addr %p (%d pages)\n",
- mem->vaddr, pages);
-
-#if 0
- int rc;
- /* Kernel userptr is used also by read() method. In this case,
- there's no need to remap, since data will be copied to user
- */
- if (!vb->baddr)
- return 0;
-
- /* FIXME: to properly support USERPTR, remap should occur.
- The code below won't work, since mem->vma = NULL
- */
- /* Try to remap memory */
- rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
- if (rc < 0) {
- printk(KERN_ERR "mmap: remap failed with error %d", rc);
- return -ENOMEM;
- }
-#endif
-
- break;
- case V4L2_MEMORY_OVERLAY:
- default:
- dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
-
- /* Currently, doesn't support V4L2_MEMORY_OVERLAY */
- printk(KERN_ERR "Memory method currently unsupported.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- struct vm_area_struct *vma)
-{
- struct videobuf_vmalloc_memory *mem;
- struct videobuf_mapping *map;
- int retval, pages;
-
- dprintk(1, "%s\n", __func__);
-
- /* create mapping + update buffer list */
- map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
- if (NULL == map)
- return -ENOMEM;
-
- buf->map = map;
- map->q = q;
-
- buf->baddr = vma->vm_start;
-
- mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
- mem->vaddr = vmalloc_user(pages);
- if (!mem->vaddr) {
- printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
- goto error;
- }
- dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
-
- /* Try to remap memory */
- retval = remap_vmalloc_range(vma, mem->vaddr, 0);
- if (retval < 0) {
- printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
- vfree(mem->vaddr);
- goto error;
- }
-
- vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = map;
-
- dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
- map, q, vma->vm_start, vma->vm_end,
- (long int)buf->bsize,
- vma->vm_pgoff, buf->i);
-
- videobuf_vm_open(vma);
-
- return 0;
-
-error:
- mem = NULL;
- kfree(map);
- return -ENOMEM;
-}
-
-static struct videobuf_qtype_ops qops = {
- .magic = MAGIC_QTYPE_OPS,
-
- .alloc_vb = __videobuf_alloc_vb,
- .iolock = __videobuf_iolock,
- .mmap_mapper = __videobuf_mmap_mapper,
- .vaddr = videobuf_to_vmalloc,
-};
-
-void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock)
-{
- videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &qops, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
-
-void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
-{
- struct videobuf_vmalloc_memory *mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- return mem->vaddr;
-}
-EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
-
-void videobuf_vmalloc_free(struct videobuf_buffer *buf)
-{
- struct videobuf_vmalloc_memory *mem = buf->priv;
-
- /* mmapped memory can't be freed here, otherwise mmapped region
- would be released, while still needed. In this case, the memory
- release should happen inside videobuf_vm_close().
- So, it should free memory only if the memory were allocated for
- read() operation.
- */
- if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
- return;
-
- if (!mem)
- return;
-
- MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
- vfree(mem->vaddr);
- mem->vaddr = NULL;
-
- return;
-}
-EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
-
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
deleted file mode 100644
index 9fc4bab2da97..000000000000
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ /dev/null
@@ -1,2710 +0,0 @@
-/*
- * videobuf2-core.c - V4L2 driver helper framework
- *
- * Copyright (C) 2010 Samsung Electronics
- *
- * Author: Pawel Osciak <pawel@osciak.com>
- * Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- */
-
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-
-#include <media/v4l2-dev.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-event.h>
-#include <media/videobuf2-core.h>
-
-static int debug;
-module_param(debug, int, 0644);
-
-#define dprintk(level, fmt, arg...) \
- do { \
- if (debug >= level) \
- printk(KERN_DEBUG "vb2: " fmt, ## arg); \
- } while (0)
-
-#define call_memop(q, op, args...) \
- (((q)->mem_ops->op) ? \
- ((q)->mem_ops->op(args)) : 0)
-
-#define call_qop(q, op, args...) \
- (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
-
-#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
- V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
- V4L2_BUF_FLAG_PREPARED | \
- V4L2_BUF_FLAG_TIMESTAMP_MASK)
-
-/**
- * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
- */
-static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
-{
- struct vb2_queue *q = vb->vb2_queue;
- void *mem_priv;
- int plane;
-
- /*
- * Allocate memory for all planes in this buffer
- * NOTE: mmapped areas should be page aligned
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
-
- mem_priv = call_memop(q, alloc, q->alloc_ctx[plane],
- size, q->gfp_flags);
- if (IS_ERR_OR_NULL(mem_priv))
- goto free;
-
- /* Associate allocator private data with this plane */
- vb->planes[plane].mem_priv = mem_priv;
- vb->v4l2_planes[plane].length = q->plane_sizes[plane];
- }
-
- return 0;
-free:
- /* Free already allocated memory if one of the allocations failed */
- for (; plane > 0; --plane) {
- call_memop(q, put, vb->planes[plane - 1].mem_priv);
- vb->planes[plane - 1].mem_priv = NULL;
- }
-
- return -ENOMEM;
-}
-
-/**
- * __vb2_buf_mem_free() - free memory of the given buffer
- */
-static void __vb2_buf_mem_free(struct vb2_buffer *vb)
-{
- struct vb2_queue *q = vb->vb2_queue;
- unsigned int plane;
-
- for (plane = 0; plane < vb->num_planes; ++plane) {
- call_memop(q, put, vb->planes[plane].mem_priv);
- vb->planes[plane].mem_priv = NULL;
- dprintk(3, "Freed plane %d of buffer %d\n", plane,
- vb->v4l2_buf.index);
- }
-}
-
-/**
- * __vb2_buf_userptr_put() - release userspace memory associated with
- * a USERPTR buffer
- */
-static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
-{
- struct vb2_queue *q = vb->vb2_queue;
- unsigned int plane;
-
- for (plane = 0; plane < vb->num_planes; ++plane) {
- if (vb->planes[plane].mem_priv)
- call_memop(q, put_userptr, vb->planes[plane].mem_priv);
- vb->planes[plane].mem_priv = NULL;
- }
-}
-
-/**
- * __vb2_plane_dmabuf_put() - release memory associated with
- * a DMABUF shared plane
- */
-static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
-{
- if (!p->mem_priv)
- return;
-
- if (p->dbuf_mapped)
- call_memop(q, unmap_dmabuf, p->mem_priv);
-
- call_memop(q, detach_dmabuf, p->mem_priv);
- dma_buf_put(p->dbuf);
- memset(p, 0, sizeof(*p));
-}
-
-/**
- * __vb2_buf_dmabuf_put() - release memory associated with
- * a DMABUF shared buffer
- */
-static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
-{
- struct vb2_queue *q = vb->vb2_queue;
- unsigned int plane;
-
- for (plane = 0; plane < vb->num_planes; ++plane)
- __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
-}
-
-/**
- * __setup_offsets() - setup unique offsets ("cookies") for every plane in
- * every buffer on the queue
- */
-static void __setup_offsets(struct vb2_queue *q, unsigned int n)
-{
- unsigned int buffer, plane;
- struct vb2_buffer *vb;
- unsigned long off;
-
- if (q->num_buffers) {
- struct v4l2_plane *p;
- vb = q->bufs[q->num_buffers - 1];
- p = &vb->v4l2_planes[vb->num_planes - 1];
- off = PAGE_ALIGN(p->m.mem_offset + p->length);
- } else {
- off = 0;
- }
-
- for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
- vb = q->bufs[buffer];
- if (!vb)
- continue;
-
- for (plane = 0; plane < vb->num_planes; ++plane) {
- vb->v4l2_planes[plane].length = q->plane_sizes[plane];
- vb->v4l2_planes[plane].m.mem_offset = off;
-
- dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
- buffer, plane, off);
-
- off += vb->v4l2_planes[plane].length;
- off = PAGE_ALIGN(off);
- }
- }
-}
-
-/**
- * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
- * video buffer memory for all buffers/planes on the queue and initializes the
- * queue
- *
- * Returns the number of buffers successfully allocated.
- */
-static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
- unsigned int num_buffers, unsigned int num_planes)
-{
- unsigned int buffer;
- struct vb2_buffer *vb;
- int ret;
-
- for (buffer = 0; buffer < num_buffers; ++buffer) {
- /* Allocate videobuf buffer structures */
- vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
- if (!vb) {
- dprintk(1, "Memory alloc for buffer struct failed\n");
- break;
- }
-
- /* Length stores number of planes for multiplanar buffers */
- if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
- vb->v4l2_buf.length = num_planes;
-
- vb->state = VB2_BUF_STATE_DEQUEUED;
- vb->vb2_queue = q;
- vb->num_planes = num_planes;
- vb->v4l2_buf.index = q->num_buffers + buffer;
- vb->v4l2_buf.type = q->type;
- vb->v4l2_buf.memory = memory;
-
- /* Allocate video buffer memory for the MMAP type */
- if (memory == V4L2_MEMORY_MMAP) {
- ret = __vb2_buf_mem_alloc(vb);
- if (ret) {
- dprintk(1, "Failed allocating memory for "
- "buffer %d\n", buffer);
- kfree(vb);
- break;
- }
- /*
- * Call the driver-provided buffer initialization
- * callback, if given. An error in initialization
- * results in queue setup failure.
- */
- ret = call_qop(q, buf_init, vb);
- if (ret) {
- dprintk(1, "Buffer %d %p initialization"
- " failed\n", buffer, vb);
- __vb2_buf_mem_free(vb);
- kfree(vb);
- break;
- }
- }
-
- q->bufs[q->num_buffers + buffer] = vb;
- }
-
- __setup_offsets(q, buffer);
-
- dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
- buffer, num_planes);
-
- return buffer;
-}
-
-/**
- * __vb2_free_mem() - release all video buffer memory for a given queue
- */
-static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
-{
- unsigned int buffer;
- struct vb2_buffer *vb;
-
- for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
- ++buffer) {
- vb = q->bufs[buffer];
- if (!vb)
- continue;
-
- /* Free MMAP buffers or release USERPTR buffers */
- if (q->memory == V4L2_MEMORY_MMAP)
- __vb2_buf_mem_free(vb);
- else if (q->memory == V4L2_MEMORY_DMABUF)
- __vb2_buf_dmabuf_put(vb);
- else
- __vb2_buf_userptr_put(vb);
- }
-}
-
-/**
- * __vb2_queue_free() - free buffers at the end of the queue - video memory and
- * related information, if no buffers are left return the queue to an
- * uninitialized state. Might be called even if the queue has already been freed.
- */
-static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
-{
- unsigned int buffer;
-
- /* Call driver-provided cleanup function for each buffer, if provided */
- if (q->ops->buf_cleanup) {
- for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
- ++buffer) {
- if (NULL == q->bufs[buffer])
- continue;
- q->ops->buf_cleanup(q->bufs[buffer]);
- }
- }
-
- /* Release video buffer memory */
- __vb2_free_mem(q, buffers);
-
- /* Free videobuf buffers */
- for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
- ++buffer) {
- kfree(q->bufs[buffer]);
- q->bufs[buffer] = NULL;
- }
-
- q->num_buffers -= buffers;
- if (!q->num_buffers)
- q->memory = 0;
- INIT_LIST_HEAD(&q->queued_list);
-}
-
-/**
- * __verify_planes_array() - verify that the planes array passed in struct
- * v4l2_buffer from userspace can be safely used
- */
-static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
-{
- if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
- return 0;
-
- /* Is memory for copying plane information present? */
- if (NULL == b->m.planes) {
- dprintk(1, "Multi-planar buffer passed but "
- "planes array not provided\n");
- return -EINVAL;
- }
-
- if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
- dprintk(1, "Incorrect planes array length, "
- "expected %d, got %d\n", vb->num_planes, b->length);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * __buffer_in_use() - return true if the buffer is in use and
- * the queue cannot be freed (by the means of REQBUFS(0)) call
- */
-static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
-{
- unsigned int plane;
- for (plane = 0; plane < vb->num_planes; ++plane) {
- void *mem_priv = vb->planes[plane].mem_priv;
- /*
- * If num_users() has not been provided, call_memop
- * will return 0, apparently nobody cares about this
- * case anyway. If num_users() returns more than 1,
- * we are not the only user of the plane's memory.
- */
- if (mem_priv && call_memop(q, num_users, mem_priv) > 1)
- return true;
- }
- return false;
-}
-
-/**
- * __buffers_in_use() - return true if any buffers on the queue are in use and
- * the queue cannot be freed (by the means of REQBUFS(0)) call
- */
-static bool __buffers_in_use(struct vb2_queue *q)
-{
- unsigned int buffer;
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
- if (__buffer_in_use(q, q->bufs[buffer]))
- return true;
- }
- return false;
-}
-
-/**
- * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
- * returned to userspace
- */
-static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
-{
- struct vb2_queue *q = vb->vb2_queue;
-
- /* Copy back data such as timestamp, flags, etc. */
- memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
- b->reserved2 = vb->v4l2_buf.reserved2;
- b->reserved = vb->v4l2_buf.reserved;
-
- if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
- /*
- * Fill in plane-related data if userspace provided an array
- * for it. The caller has already verified memory and size.
- */
- b->length = vb->num_planes;
- memcpy(b->m.planes, vb->v4l2_planes,
- b->length * sizeof(struct v4l2_plane));
- } else {
- /*
- * We use length and offset in v4l2_planes array even for
- * single-planar buffers, but userspace does not.
- */
- b->length = vb->v4l2_planes[0].length;
- b->bytesused = vb->v4l2_planes[0].bytesused;
- if (q->memory == V4L2_MEMORY_MMAP)
- b->m.offset = vb->v4l2_planes[0].m.mem_offset;
- else if (q->memory == V4L2_MEMORY_USERPTR)
- b->m.userptr = vb->v4l2_planes[0].m.userptr;
- else if (q->memory == V4L2_MEMORY_DMABUF)
- b->m.fd = vb->v4l2_planes[0].m.fd;
- }
-
- /*
- * Clear any buffer state related flags.
- */
- b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
- b->flags |= q->timestamp_type;
-
- switch (vb->state) {
- case VB2_BUF_STATE_QUEUED:
- case VB2_BUF_STATE_ACTIVE:
- b->flags |= V4L2_BUF_FLAG_QUEUED;
- break;
- case VB2_BUF_STATE_ERROR:
- b->flags |= V4L2_BUF_FLAG_ERROR;
- /* fall through */
- case VB2_BUF_STATE_DONE:
- b->flags |= V4L2_BUF_FLAG_DONE;
- break;
- case VB2_BUF_STATE_PREPARED:
- b->flags |= V4L2_BUF_FLAG_PREPARED;
- break;
- case VB2_BUF_STATE_DEQUEUED:
- /* nothing */
- break;
- }
-
- if (__buffer_in_use(q, vb))
- b->flags |= V4L2_BUF_FLAG_MAPPED;
-}
-
-/**
- * vb2_querybuf() - query video buffer information
- * @q: videobuf queue
- * @b: buffer struct passed from userspace to vidioc_querybuf handler
- * in driver
- *
- * Should be called from vidioc_querybuf ioctl handler in driver.
- * This function will verify the passed v4l2_buffer structure and fill the
- * relevant information for the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_querybuf handler in driver.
- */
-int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
-{
- struct vb2_buffer *vb;
- int ret;
-
- if (b->type != q->type) {
- dprintk(1, "querybuf: wrong buffer type\n");
- return -EINVAL;
- }
-
- if (b->index >= q->num_buffers) {
- dprintk(1, "querybuf: buffer index out of range\n");
- return -EINVAL;
- }
- vb = q->bufs[b->index];
- ret = __verify_planes_array(vb, b);
- if (!ret)
- __fill_v4l2_buffer(vb, b);
- return ret;
-}
-EXPORT_SYMBOL(vb2_querybuf);
-
-/**
- * __verify_userptr_ops() - verify that all memory operations required for
- * USERPTR queue type have been provided
- */
-static int __verify_userptr_ops(struct vb2_queue *q)
-{
- if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
- !q->mem_ops->put_userptr)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * __verify_mmap_ops() - verify that all memory operations required for
- * MMAP queue type have been provided
- */
-static int __verify_mmap_ops(struct vb2_queue *q)
-{
- if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
- !q->mem_ops->put || !q->mem_ops->mmap)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * __verify_dmabuf_ops() - verify that all memory operations required for
- * DMABUF queue type have been provided
- */
-static int __verify_dmabuf_ops(struct vb2_queue *q)
-{
- if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
- !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
- !q->mem_ops->unmap_dmabuf)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * __verify_memory_type() - Check whether the memory type and buffer type
- * passed to a buffer operation are compatible with the queue.
- */
-static int __verify_memory_type(struct vb2_queue *q,
- enum v4l2_memory memory, enum v4l2_buf_type type)
-{
- if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
- memory != V4L2_MEMORY_DMABUF) {
- dprintk(1, "reqbufs: unsupported memory type\n");
- return -EINVAL;
- }
-
- if (type != q->type) {
- dprintk(1, "reqbufs: requested type is incorrect\n");
- return -EINVAL;
- }
-
- /*
- * Make sure all the required memory ops for given memory type
- * are available.
- */
- if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
- dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
- return -EINVAL;
- }
-
- if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
- dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
- return -EINVAL;
- }
-
- if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
- dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
- return -EINVAL;
- }
-
- /*
- * Place the busy tests at the end: -EBUSY can be ignored when
- * create_bufs is called with count == 0, but count == 0 should still
- * do the memory and type validation.
- */
- if (q->fileio) {
- dprintk(1, "reqbufs: file io in progress\n");
- return -EBUSY;
- }
- return 0;
-}
-
-/**
- * __reqbufs() - Initiate streaming
- * @q: videobuf2 queue
- * @req: struct passed from userspace to vidioc_reqbufs handler in driver
- *
- * Should be called from vidioc_reqbufs ioctl handler of a driver.
- * This function:
- * 1) verifies streaming parameters passed from the userspace,
- * 2) sets up the queue,
- * 3) negotiates number of buffers and planes per buffer with the driver
- * to be used during streaming,
- * 4) allocates internal buffer structures (struct vb2_buffer), according to
- * the agreed parameters,
- * 5) for MMAP memory type, allocates actual video memory, using the
- * memory handling/allocation routines provided during queue initialization
- *
- * If req->count is 0, all the memory will be freed instead.
- * If the queue has been allocated previously (by a previous vb2_reqbufs) call
- * and the queue is not busy, memory will be reallocated.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_reqbufs handler in driver.
- */
-static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
-{
- unsigned int num_buffers, allocated_buffers, num_planes = 0;
- int ret;
-
- if (q->streaming) {
- dprintk(1, "reqbufs: streaming active\n");
- return -EBUSY;
- }
-
- if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
- /*
- * We already have buffers allocated, so first check if they
- * are not in use and can be freed.
- */
- if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
- dprintk(1, "reqbufs: memory in use, cannot free\n");
- return -EBUSY;
- }
-
- __vb2_queue_free(q, q->num_buffers);
-
- /*
- * In case of REQBUFS(0) return immediately without calling
- * driver's queue_setup() callback and allocating resources.
- */
- if (req->count == 0)
- return 0;
- }
-
- /*
- * Make sure the requested values and current defaults are sane.
- */
- num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
- memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
- memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
- q->memory = req->memory;
-
- /*
- * Ask the driver how many buffers and planes per buffer it requires.
- * Driver also sets the size and allocator context for each plane.
- */
- ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
- q->plane_sizes, q->alloc_ctx);
- if (ret)
- return ret;
-
- /* Finally, allocate buffers and video memory */
- ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
- if (ret == 0) {
- dprintk(1, "Memory allocation failed\n");
- return -ENOMEM;
- }
-
- allocated_buffers = ret;
-
- /*
- * Check if driver can handle the allocated number of buffers.
- */
- if (allocated_buffers < num_buffers) {
- num_buffers = allocated_buffers;
-
- ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
- &num_planes, q->plane_sizes, q->alloc_ctx);
-
- if (!ret && allocated_buffers < num_buffers)
- ret = -ENOMEM;
-
- /*
- * Either the driver has accepted a smaller number of buffers,
- * or .queue_setup() returned an error
- */
- }
-
- q->num_buffers = allocated_buffers;
-
- if (ret < 0) {
- __vb2_queue_free(q, allocated_buffers);
- return ret;
- }
-
- /*
- * Return the number of successfully allocated buffers
- * to the userspace.
- */
- req->count = allocated_buffers;
-
- return 0;
-}
-
-/**
- * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
- * type values.
- * @q: videobuf2 queue
- * @req: struct passed from userspace to vidioc_reqbufs handler in driver
- */
-int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
-{
- int ret = __verify_memory_type(q, req->memory, req->type);
-
- return ret ? ret : __reqbufs(q, req);
-}
-EXPORT_SYMBOL_GPL(vb2_reqbufs);
-
-/**
- * __create_bufs() - Allocate buffers and any required auxiliary structs
- * @q: videobuf2 queue
- * @create: creation parameters, passed from userspace to vidioc_create_bufs
- * handler in driver
- *
- * Should be called from vidioc_create_bufs ioctl handler of a driver.
- * This function:
- * 1) verifies parameter sanity
- * 2) calls the .queue_setup() queue operation
- * 3) performs any necessary memory allocations
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_create_bufs handler in driver.
- */
-static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
-{
- unsigned int num_planes = 0, num_buffers, allocated_buffers;
- int ret;
-
- if (q->num_buffers == VIDEO_MAX_FRAME) {
- dprintk(1, "%s(): maximum number of buffers already allocated\n",
- __func__);
- return -ENOBUFS;
- }
-
- if (!q->num_buffers) {
- memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
- memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
- q->memory = create->memory;
- }
-
- num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
-
- /*
- * Ask the driver, whether the requested number of buffers, planes per
- * buffer and their sizes are acceptable
- */
- ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
- &num_planes, q->plane_sizes, q->alloc_ctx);
- if (ret)
- return ret;
-
- /* Finally, allocate buffers and video memory */
- ret = __vb2_queue_alloc(q, create->memory, num_buffers,
- num_planes);
- if (ret == 0) {
- dprintk(1, "Memory allocation failed\n");
- return -ENOMEM;
- }
-
- allocated_buffers = ret;
-
- /*
- * Check if driver can handle the so far allocated number of buffers.
- */
- if (ret < num_buffers) {
- num_buffers = ret;
-
- /*
- * q->num_buffers contains the total number of buffers, that the
- * queue driver has set up
- */
- ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
- &num_planes, q->plane_sizes, q->alloc_ctx);
-
- if (!ret && allocated_buffers < num_buffers)
- ret = -ENOMEM;
-
- /*
- * Either the driver has accepted a smaller number of buffers,
- * or .queue_setup() returned an error
- */
- }
-
- q->num_buffers += allocated_buffers;
-
- if (ret < 0) {
- __vb2_queue_free(q, allocated_buffers);
- return -ENOMEM;
- }
-
- /*
- * Return the number of successfully allocated buffers
- * to the userspace.
- */
- create->count = allocated_buffers;
-
- return 0;
-}
-
-/**
- * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
- * memory and type values.
- * @q: videobuf2 queue
- * @create: creation parameters, passed from userspace to vidioc_create_bufs
- * handler in driver
- */
-int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
-{
- int ret = __verify_memory_type(q, create->memory, create->format.type);
-
- create->index = q->num_buffers;
- if (create->count == 0)
- return ret != -EBUSY ? ret : 0;
- return ret ? ret : __create_bufs(q, create);
-}
-EXPORT_SYMBOL_GPL(vb2_create_bufs);
-
-/**
- * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
- * @vb: vb2_buffer to which the plane in question belongs to
- * @plane_no: plane number for which the address is to be returned
- *
- * This function returns a kernel virtual address of a given plane if
- * such a mapping exist, NULL otherwise.
- */
-void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
-{
- struct vb2_queue *q = vb->vb2_queue;
-
- if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
- return NULL;
-
- return call_memop(q, vaddr, vb->planes[plane_no].mem_priv);
-
-}
-EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
-
-/**
- * vb2_plane_cookie() - Return allocator specific cookie for the given plane
- * @vb: vb2_buffer to which the plane in question belongs to
- * @plane_no: plane number for which the cookie is to be returned
- *
- * This function returns an allocator specific cookie for a given plane if
- * available, NULL otherwise. The allocator should provide some simple static
- * inline function, which would convert this cookie to the allocator specific
- * type that can be used directly by the driver to access the buffer. This can
- * be for example physical address, pointer to scatter list or IOMMU mapping.
- */
-void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
-{
- struct vb2_queue *q = vb->vb2_queue;
-
- if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
- return NULL;
-
- return call_memop(q, cookie, vb->planes[plane_no].mem_priv);
-}
-EXPORT_SYMBOL_GPL(vb2_plane_cookie);
-
-/**
- * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
- * @vb: vb2_buffer returned from the driver
- * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
- * or VB2_BUF_STATE_ERROR if the operation finished with an error
- *
- * This function should be called by the driver after a hardware operation on
- * a buffer is finished and the buffer may be returned to userspace. The driver
- * cannot use this buffer anymore until it is queued back to it by videobuf
- * by the means of buf_queue callback. Only buffers previously queued to the
- * driver by buf_queue can be passed to this function.
- */
-void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
-{
- struct vb2_queue *q = vb->vb2_queue;
- unsigned long flags;
- unsigned int plane;
-
- if (vb->state != VB2_BUF_STATE_ACTIVE)
- return;
-
- if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
- return;
-
- dprintk(4, "Done processing on buffer %d, state: %d\n",
- vb->v4l2_buf.index, state);
-
- /* sync buffers */
- for (plane = 0; plane < vb->num_planes; ++plane)
- call_memop(q, finish, vb->planes[plane].mem_priv);
-
- /* Add the buffer to the done buffers list */
- spin_lock_irqsave(&q->done_lock, flags);
- vb->state = state;
- list_add_tail(&vb->done_entry, &q->done_list);
- atomic_dec(&q->queued_count);
- spin_unlock_irqrestore(&q->done_lock, flags);
-
- /* Inform any processes that may be waiting for buffers */
- wake_up(&q->done_wq);
-}
-EXPORT_SYMBOL_GPL(vb2_buffer_done);
-
-/**
- * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
- * v4l2_buffer by the userspace. The caller has already verified that struct
- * v4l2_buffer has a valid number of planes.
- */
-static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
- struct v4l2_plane *v4l2_planes)
-{
- unsigned int plane;
-
- if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
- /* Fill in driver-provided information for OUTPUT types */
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * Will have to go up to b->length when API starts
- * accepting variable number of planes.
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- v4l2_planes[plane].bytesused =
- b->m.planes[plane].bytesused;
- v4l2_planes[plane].data_offset =
- b->m.planes[plane].data_offset;
- }
- }
-
- if (b->memory == V4L2_MEMORY_USERPTR) {
- for (plane = 0; plane < vb->num_planes; ++plane) {
- v4l2_planes[plane].m.userptr =
- b->m.planes[plane].m.userptr;
- v4l2_planes[plane].length =
- b->m.planes[plane].length;
- }
- }
- if (b->memory == V4L2_MEMORY_DMABUF) {
- for (plane = 0; plane < vb->num_planes; ++plane) {
- v4l2_planes[plane].m.fd =
- b->m.planes[plane].m.fd;
- v4l2_planes[plane].length =
- b->m.planes[plane].length;
- v4l2_planes[plane].data_offset =
- b->m.planes[plane].data_offset;
- }
- }
- } else {
- /*
- * Single-planar buffers do not use planes array,
- * so fill in relevant v4l2_buffer struct fields instead.
- * In videobuf we use our internal V4l2_planes struct for
- * single-planar buffers as well, for simplicity.
- */
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- v4l2_planes[0].bytesused = b->bytesused;
- v4l2_planes[0].data_offset = 0;
- }
-
- if (b->memory == V4L2_MEMORY_USERPTR) {
- v4l2_planes[0].m.userptr = b->m.userptr;
- v4l2_planes[0].length = b->length;
- }
-
- if (b->memory == V4L2_MEMORY_DMABUF) {
- v4l2_planes[0].m.fd = b->m.fd;
- v4l2_planes[0].length = b->length;
- v4l2_planes[0].data_offset = 0;
- }
-
- }
-
- vb->v4l2_buf.field = b->field;
- vb->v4l2_buf.timestamp = b->timestamp;
- vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
-}
-
-/**
- * __qbuf_userptr() - handle qbuf of a USERPTR buffer
- */
-static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
-{
- struct v4l2_plane planes[VIDEO_MAX_PLANES];
- struct vb2_queue *q = vb->vb2_queue;
- void *mem_priv;
- unsigned int plane;
- int ret;
- int write = !V4L2_TYPE_IS_OUTPUT(q->type);
-
- /* Copy relevant information provided by the userspace */
- __fill_vb2_buffer(vb, b, planes);
-
- for (plane = 0; plane < vb->num_planes; ++plane) {
- /* Skip the plane if already verified */
- if (vb->v4l2_planes[plane].m.userptr &&
- vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
- && vb->v4l2_planes[plane].length == planes[plane].length)
- continue;
-
- dprintk(3, "qbuf: userspace address for plane %d changed, "
- "reacquiring memory\n", plane);
-
- /* Check if the provided plane buffer is large enough */
- if (planes[plane].length < q->plane_sizes[plane]) {
- ret = -EINVAL;
- goto err;
- }
-
- /* Release previously acquired memory if present */
- if (vb->planes[plane].mem_priv)
- call_memop(q, put_userptr, vb->planes[plane].mem_priv);
-
- vb->planes[plane].mem_priv = NULL;
- vb->v4l2_planes[plane].m.userptr = 0;
- vb->v4l2_planes[plane].length = 0;
-
- /* Acquire each plane's memory */
- mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane],
- planes[plane].m.userptr,
- planes[plane].length, write);
- if (IS_ERR_OR_NULL(mem_priv)) {
- dprintk(1, "qbuf: failed acquiring userspace "
- "memory for plane %d\n", plane);
- ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
- goto err;
- }
- vb->planes[plane].mem_priv = mem_priv;
- }
-
- /*
- * Call driver-specific initialization on the newly acquired buffer,
- * if provided.
- */
- ret = call_qop(q, buf_init, vb);
- if (ret) {
- dprintk(1, "qbuf: buffer initialization failed\n");
- goto err;
- }
-
- /*
- * Now that everything is in order, copy relevant information
- * provided by userspace.
- */
- for (plane = 0; plane < vb->num_planes; ++plane)
- vb->v4l2_planes[plane] = planes[plane];
-
- return 0;
-err:
- /* In case of errors, release planes that were already acquired */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- if (vb->planes[plane].mem_priv)
- call_memop(q, put_userptr, vb->planes[plane].mem_priv);
- vb->planes[plane].mem_priv = NULL;
- vb->v4l2_planes[plane].m.userptr = 0;
- vb->v4l2_planes[plane].length = 0;
- }
-
- return ret;
-}
-
-/**
- * __qbuf_mmap() - handle qbuf of an MMAP buffer
- */
-static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
-{
- __fill_vb2_buffer(vb, b, vb->v4l2_planes);
- return 0;
-}
-
-/**
- * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
- */
-static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
-{
- struct v4l2_plane planes[VIDEO_MAX_PLANES];
- struct vb2_queue *q = vb->vb2_queue;
- void *mem_priv;
- unsigned int plane;
- int ret;
- int write = !V4L2_TYPE_IS_OUTPUT(q->type);
-
- /* Verify and copy relevant information provided by the userspace */
- __fill_vb2_buffer(vb, b, planes);
-
- for (plane = 0; plane < vb->num_planes; ++plane) {
- struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
-
- if (IS_ERR_OR_NULL(dbuf)) {
- dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
- plane);
- ret = -EINVAL;
- goto err;
- }
-
- /* use DMABUF size if length is not provided */
- if (planes[plane].length == 0)
- planes[plane].length = dbuf->size;
-
- if (planes[plane].length < planes[plane].data_offset +
- q->plane_sizes[plane]) {
- ret = -EINVAL;
- goto err;
- }
-
- /* Skip the plane if already verified */
- if (dbuf == vb->planes[plane].dbuf &&
- vb->v4l2_planes[plane].length == planes[plane].length) {
- dma_buf_put(dbuf);
- continue;
- }
-
- dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
-
- /* Release previously acquired memory if present */
- __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
- memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
-
- /* Acquire each plane's memory */
- mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
- dbuf, planes[plane].length, write);
- if (IS_ERR(mem_priv)) {
- dprintk(1, "qbuf: failed to attach dmabuf\n");
- ret = PTR_ERR(mem_priv);
- dma_buf_put(dbuf);
- goto err;
- }
-
- vb->planes[plane].dbuf = dbuf;
- vb->planes[plane].mem_priv = mem_priv;
- }
-
- /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
- * really we want to do this just before the DMA, not while queueing
- * the buffer(s)..
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
- if (ret) {
- dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
- plane);
- goto err;
- }
- vb->planes[plane].dbuf_mapped = 1;
- }
-
- /*
- * Call driver-specific initialization on the newly acquired buffer,
- * if provided.
- */
- ret = call_qop(q, buf_init, vb);
- if (ret) {
- dprintk(1, "qbuf: buffer initialization failed\n");
- goto err;
- }
-
- /*
- * Now that everything is in order, copy relevant information
- * provided by userspace.
- */
- for (plane = 0; plane < vb->num_planes; ++plane)
- vb->v4l2_planes[plane] = planes[plane];
-
- return 0;
-err:
- /* In case of errors, release planes that were already acquired */
- __vb2_buf_dmabuf_put(vb);
-
- return ret;
-}
-
-/**
- * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
- */
-static void __enqueue_in_driver(struct vb2_buffer *vb)
-{
- struct vb2_queue *q = vb->vb2_queue;
- unsigned int plane;
-
- vb->state = VB2_BUF_STATE_ACTIVE;
- atomic_inc(&q->queued_count);
-
- /* sync buffers */
- for (plane = 0; plane < vb->num_planes; ++plane)
- call_memop(q, prepare, vb->planes[plane].mem_priv);
-
- q->ops->buf_queue(vb);
-}
-
-static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
-{
- struct vb2_queue *q = vb->vb2_queue;
- int ret;
-
- switch (q->memory) {
- case V4L2_MEMORY_MMAP:
- ret = __qbuf_mmap(vb, b);
- break;
- case V4L2_MEMORY_USERPTR:
- ret = __qbuf_userptr(vb, b);
- break;
- case V4L2_MEMORY_DMABUF:
- ret = __qbuf_dmabuf(vb, b);
- break;
- default:
- WARN(1, "Invalid queue type\n");
- ret = -EINVAL;
- }
-
- if (!ret)
- ret = call_qop(q, buf_prepare, vb);
- if (ret)
- dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
- else
- vb->state = VB2_BUF_STATE_PREPARED;
-
- return ret;
-}
-
-/**
- * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_prepare_buf
- * handler in driver
- *
- * Should be called from vidioc_prepare_buf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_prepare callback in the driver (if provided), in which
- * driver-specific buffer initialization can be performed,
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_prepare_buf handler in driver.
- */
-int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
-{
- struct vb2_buffer *vb;
- int ret;
-
- if (q->fileio) {
- dprintk(1, "%s(): file io in progress\n", __func__);
- return -EBUSY;
- }
-
- if (b->type != q->type) {
- dprintk(1, "%s(): invalid buffer type\n", __func__);
- return -EINVAL;
- }
-
- if (b->index >= q->num_buffers) {
- dprintk(1, "%s(): buffer index out of range\n", __func__);
- return -EINVAL;
- }
-
- vb = q->bufs[b->index];
- if (NULL == vb) {
- /* Should never happen */
- dprintk(1, "%s(): buffer is NULL\n", __func__);
- return -EINVAL;
- }
-
- if (b->memory != q->memory) {
- dprintk(1, "%s(): invalid memory type\n", __func__);
- return -EINVAL;
- }
-
- if (vb->state != VB2_BUF_STATE_DEQUEUED) {
- dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
- return -EINVAL;
- }
- ret = __verify_planes_array(vb, b);
- if (ret < 0)
- return ret;
- ret = __buf_prepare(vb, b);
- if (ret < 0)
- return ret;
-
- __fill_v4l2_buffer(vb, b);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_prepare_buf);
-
-/**
- * vb2_qbuf() - Queue a buffer from userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_qbuf handler
- * in driver
- *
- * Should be called from vidioc_qbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
- * which driver-specific buffer initialization can be performed,
- * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
- * callback for processing.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_qbuf handler in driver.
- */
-int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
-{
- struct rw_semaphore *mmap_sem = NULL;
- struct vb2_buffer *vb;
- int ret = 0;
-
- /*
- * In case of user pointer buffers vb2 allocator needs to get direct
- * access to userspace pages. This requires getting read access on
- * mmap semaphore in the current process structure. The same
- * semaphore is taken before calling mmap operation, while both mmap
- * and qbuf are called by the driver or v4l2 core with driver's lock
- * held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in
- * mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core
- * release driver's lock, takes mmap_sem and then takes again driver's
- * lock.
- *
- * To avoid race with other vb2 calls, which might be called after
- * releasing driver's lock, this operation is performed at the
- * beggining of qbuf processing. This way the queue status is
- * consistent after getting driver's lock back.
- */
- if (q->memory == V4L2_MEMORY_USERPTR) {
- mmap_sem = &current->mm->mmap_sem;
- call_qop(q, wait_prepare, q);
- down_read(mmap_sem);
- call_qop(q, wait_finish, q);
- }
-
- if (q->fileio) {
- dprintk(1, "qbuf: file io in progress\n");
- ret = -EBUSY;
- goto unlock;
- }
-
- if (b->type != q->type) {
- dprintk(1, "qbuf: invalid buffer type\n");
- ret = -EINVAL;
- goto unlock;
- }
-
- if (b->index >= q->num_buffers) {
- dprintk(1, "qbuf: buffer index out of range\n");
- ret = -EINVAL;
- goto unlock;
- }
-
- vb = q->bufs[b->index];
- if (NULL == vb) {
- /* Should never happen */
- dprintk(1, "qbuf: buffer is NULL\n");
- ret = -EINVAL;
- goto unlock;
- }
-
- if (b->memory != q->memory) {
- dprintk(1, "qbuf: invalid memory type\n");
- ret = -EINVAL;
- goto unlock;
- }
- ret = __verify_planes_array(vb, b);
- if (ret)
- goto unlock;
-
- switch (vb->state) {
- case VB2_BUF_STATE_DEQUEUED:
- ret = __buf_prepare(vb, b);
- if (ret)
- goto unlock;
- case VB2_BUF_STATE_PREPARED:
- break;
- default:
- dprintk(1, "qbuf: buffer already in use\n");
- ret = -EINVAL;
- goto unlock;
- }
-
- /*
- * Add to the queued buffers list, a buffer will stay on it until
- * dequeued in dqbuf.
- */
- list_add_tail(&vb->queued_entry, &q->queued_list);
- vb->state = VB2_BUF_STATE_QUEUED;
-
- /*
- * If already streaming, give the buffer to driver for processing.
- * If not, the buffer will be given to driver on next streamon.
- */
- if (q->streaming)
- __enqueue_in_driver(vb);
-
- /* Fill buffer information for the userspace */
- __fill_v4l2_buffer(vb, b);
-
- dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
-unlock:
- if (mmap_sem)
- up_read(mmap_sem);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vb2_qbuf);
-
-/**
- * __vb2_wait_for_done_vb() - wait for a buffer to become available
- * for dequeuing
- *
- * Will sleep if required for nonblocking == false.
- */
-static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
-{
- /*
- * All operations on vb_done_list are performed under done_lock
- * spinlock protection. However, buffers may be removed from
- * it and returned to userspace only while holding both driver's
- * lock and the done_lock spinlock. Thus we can be sure that as
- * long as we hold the driver's lock, the list will remain not
- * empty if list_empty() check succeeds.
- */
-
- for (;;) {
- int ret;
-
- if (!q->streaming) {
- dprintk(1, "Streaming off, will not wait for buffers\n");
- return -EINVAL;
- }
-
- if (!list_empty(&q->done_list)) {
- /*
- * Found a buffer that we were waiting for.
- */
- break;
- }
-
- if (nonblocking) {
- dprintk(1, "Nonblocking and no buffers to dequeue, "
- "will not wait\n");
- return -EAGAIN;
- }
-
- /*
- * We are streaming and blocking, wait for another buffer to
- * become ready or for streamoff. Driver's lock is released to
- * allow streamoff or qbuf to be called while waiting.
- */
- call_qop(q, wait_prepare, q);
-
- /*
- * All locks have been released, it is safe to sleep now.
- */
- dprintk(3, "Will sleep waiting for buffers\n");
- ret = wait_event_interruptible(q->done_wq,
- !list_empty(&q->done_list) || !q->streaming);
-
- /*
- * We need to reevaluate both conditions again after reacquiring
- * the locks or return an error if one occurred.
- */
- call_qop(q, wait_finish, q);
- if (ret) {
- dprintk(1, "Sleep was interrupted\n");
- return ret;
- }
- }
- return 0;
-}
-
-/**
- * __vb2_get_done_vb() - get a buffer ready for dequeuing
- *
- * Will sleep if required for nonblocking == false.
- */
-static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- struct v4l2_buffer *b, int nonblocking)
-{
- unsigned long flags;
- int ret;
-
- /*
- * Wait for at least one buffer to become available on the done_list.
- */
- ret = __vb2_wait_for_done_vb(q, nonblocking);
- if (ret)
- return ret;
-
- /*
- * Driver's lock has been held since we last verified that done_list
- * is not empty, so no need for another list_empty(done_list) check.
- */
- spin_lock_irqsave(&q->done_lock, flags);
- *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
- /*
- * Only remove the buffer from done_list if v4l2_buffer can handle all
- * the planes.
- */
- ret = __verify_planes_array(*vb, b);
- if (!ret)
- list_del(&(*vb)->done_entry);
- spin_unlock_irqrestore(&q->done_lock, flags);
-
- return ret;
-}
-
-/**
- * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
- * @q: videobuf2 queue
- *
- * This function will wait until all buffers that have been given to the driver
- * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
- * wait_prepare, wait_finish pair. It is intended to be called with all locks
- * taken, for example from stop_streaming() callback.
- */
-int vb2_wait_for_all_buffers(struct vb2_queue *q)
-{
- if (!q->streaming) {
- dprintk(1, "Streaming off, will not wait for buffers\n");
- return -EINVAL;
- }
-
- wait_event(q->done_wq, !atomic_read(&q->queued_count));
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
-
-/**
- * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
- */
-static void __vb2_dqbuf(struct vb2_buffer *vb)
-{
- struct vb2_queue *q = vb->vb2_queue;
- unsigned int i;
-
- /* nothing to do if the buffer is already dequeued */
- if (vb->state == VB2_BUF_STATE_DEQUEUED)
- return;
-
- vb->state = VB2_BUF_STATE_DEQUEUED;
-
- /* unmap DMABUF buffer */
- if (q->memory == V4L2_MEMORY_DMABUF)
- for (i = 0; i < vb->num_planes; ++i) {
- if (!vb->planes[i].dbuf_mapped)
- continue;
- call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
- vb->planes[i].dbuf_mapped = 0;
- }
-}
-
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_dqbuf handler
- * in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- * buffers ready for dequeuing are present. Normally the driver
- * would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_finish callback in the driver (if provided), in which
- * driver can perform any additional operations that may be required before
- * returning the buffer to userspace, such as cache sync,
- * 3) the buffer struct members are filled with relevant information for
- * the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
-int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
-{
- struct vb2_buffer *vb = NULL;
- int ret;
-
- if (q->fileio) {
- dprintk(1, "dqbuf: file io in progress\n");
- return -EBUSY;
- }
-
- if (b->type != q->type) {
- dprintk(1, "dqbuf: invalid buffer type\n");
- return -EINVAL;
- }
- ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
- if (ret < 0)
- return ret;
-
- ret = call_qop(q, buf_finish, vb);
- if (ret) {
- dprintk(1, "dqbuf: buffer finish failed\n");
- return ret;
- }
-
- switch (vb->state) {
- case VB2_BUF_STATE_DONE:
- dprintk(3, "dqbuf: Returning done buffer\n");
- break;
- case VB2_BUF_STATE_ERROR:
- dprintk(3, "dqbuf: Returning done buffer with errors\n");
- break;
- default:
- dprintk(1, "dqbuf: Invalid buffer state\n");
- return -EINVAL;
- }
-
- /* Fill buffer information for the userspace */
- __fill_v4l2_buffer(vb, b);
- /* Remove from videobuf queue */
- list_del(&vb->queued_entry);
- /* go back to dequeued state */
- __vb2_dqbuf(vb);
-
- dprintk(1, "dqbuf of buffer %d, with state %d\n",
- vb->v4l2_buf.index, vb->state);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_dqbuf);
-
-/**
- * __vb2_queue_cancel() - cancel and stop (pause) streaming
- *
- * Removes all queued buffers from driver's queue and all buffers queued by
- * userspace from videobuf's queue. Returns to state after reqbufs.
- */
-static void __vb2_queue_cancel(struct vb2_queue *q)
-{
- unsigned int i;
-
- /*
- * Tell driver to stop all transactions and release all queued
- * buffers.
- */
- if (q->streaming)
- call_qop(q, stop_streaming, q);
- q->streaming = 0;
-
- /*
- * Remove all buffers from videobuf's list...
- */
- INIT_LIST_HEAD(&q->queued_list);
- /*
- * ...and done list; userspace will not receive any buffers it
- * has not already dequeued before initiating cancel.
- */
- INIT_LIST_HEAD(&q->done_list);
- atomic_set(&q->queued_count, 0);
- wake_up_all(&q->done_wq);
-
- /*
- * Reinitialize all buffers for next use.
- */
- for (i = 0; i < q->num_buffers; ++i)
- __vb2_dqbuf(q->bufs[i]);
-}
-
-/**
- * vb2_streamon - start streaming
- * @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamon handler
- *
- * Should be called from vidioc_streamon handler of a driver.
- * This function:
- * 1) verifies current state
- * 2) passes any previously queued buffers to the driver and starts streaming
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_streamon handler in the driver.
- */
-int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
-{
- struct vb2_buffer *vb;
- int ret;
-
- if (q->fileio) {
- dprintk(1, "streamon: file io in progress\n");
- return -EBUSY;
- }
-
- if (type != q->type) {
- dprintk(1, "streamon: invalid stream type\n");
- return -EINVAL;
- }
-
- if (q->streaming) {
- dprintk(1, "streamon: already streaming\n");
- return -EBUSY;
- }
-
- /*
- * If any buffers were queued before streamon,
- * we can now pass them to driver for processing.
- */
- list_for_each_entry(vb, &q->queued_list, queued_entry)
- __enqueue_in_driver(vb);
-
- /*
- * Let driver notice that streaming state has been enabled.
- */
- ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
- if (ret) {
- dprintk(1, "streamon: driver refused to start streaming\n");
- __vb2_queue_cancel(q);
- return ret;
- }
-
- q->streaming = 1;
-
- dprintk(3, "Streamon successful\n");
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_streamon);
-
-
-/**
- * vb2_streamoff - stop streaming
- * @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamoff handler
- *
- * Should be called from vidioc_streamoff handler of a driver.
- * This function:
- * 1) verifies current state,
- * 2) stop streaming and dequeues any queued buffers, including those previously
- * passed to the driver (after waiting for the driver to finish).
- *
- * This call can be used for pausing playback.
- * The return values from this function are intended to be directly returned
- * from vidioc_streamoff handler in the driver
- */
-int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
-{
- if (q->fileio) {
- dprintk(1, "streamoff: file io in progress\n");
- return -EBUSY;
- }
-
- if (type != q->type) {
- dprintk(1, "streamoff: invalid stream type\n");
- return -EINVAL;
- }
-
- if (!q->streaming) {
- dprintk(1, "streamoff: not streaming\n");
- return -EINVAL;
- }
-
- /*
- * Cancel will pause streaming and remove all buffers from the driver
- * and videobuf, effectively returning control over them to userspace.
- */
- __vb2_queue_cancel(q);
-
- dprintk(3, "Streamoff successful\n");
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_streamoff);
-
-/**
- * __find_plane_by_offset() - find plane associated with the given offset off
- */
-static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
- unsigned int *_buffer, unsigned int *_plane)
-{
- struct vb2_buffer *vb;
- unsigned int buffer, plane;
-
- /*
- * Go over all buffers and their planes, comparing the given offset
- * with an offset assigned to each plane. If a match is found,
- * return its buffer and plane numbers.
- */
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
- vb = q->bufs[buffer];
-
- for (plane = 0; plane < vb->num_planes; ++plane) {
- if (vb->v4l2_planes[plane].m.mem_offset == off) {
- *_buffer = buffer;
- *_plane = plane;
- return 0;
- }
- }
- }
-
- return -EINVAL;
-}
-
-/**
- * vb2_expbuf() - Export a buffer as a file descriptor
- * @q: videobuf2 queue
- * @eb: export buffer structure passed from userspace to vidioc_expbuf
- * handler in driver
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_expbuf handler in driver.
- */
-int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
-{
- struct vb2_buffer *vb = NULL;
- struct vb2_plane *vb_plane;
- int ret;
- struct dma_buf *dbuf;
-
- if (q->memory != V4L2_MEMORY_MMAP) {
- dprintk(1, "Queue is not currently set up for mmap\n");
- return -EINVAL;
- }
-
- if (!q->mem_ops->get_dmabuf) {
- dprintk(1, "Queue does not support DMA buffer exporting\n");
- return -EINVAL;
- }
-
- if (eb->flags & ~O_CLOEXEC) {
- dprintk(1, "Queue does support only O_CLOEXEC flag\n");
- return -EINVAL;
- }
-
- if (eb->type != q->type) {
- dprintk(1, "qbuf: invalid buffer type\n");
- return -EINVAL;
- }
-
- if (eb->index >= q->num_buffers) {
- dprintk(1, "buffer index out of range\n");
- return -EINVAL;
- }
-
- vb = q->bufs[eb->index];
-
- if (eb->plane >= vb->num_planes) {
- dprintk(1, "buffer plane out of range\n");
- return -EINVAL;
- }
-
- vb_plane = &vb->planes[eb->plane];
-
- dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
- if (IS_ERR_OR_NULL(dbuf)) {
- dprintk(1, "Failed to export buffer %d, plane %d\n",
- eb->index, eb->plane);
- return -EINVAL;
- }
-
- ret = dma_buf_fd(dbuf, eb->flags);
- if (ret < 0) {
- dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
- eb->index, eb->plane, ret);
- dma_buf_put(dbuf);
- return ret;
- }
-
- dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
- eb->index, eb->plane, ret);
- eb->fd = ret;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_expbuf);
-
-/**
- * vb2_mmap() - map video buffers into application address space
- * @q: videobuf2 queue
- * @vma: vma passed to the mmap file operation handler in the driver
- *
- * Should be called from mmap file operation handler of a driver.
- * This function maps one plane of one of the available video buffers to
- * userspace. To map whole video memory allocated on reqbufs, this function
- * has to be called once per each plane per each buffer previously allocated.
- *
- * When the userspace application calls mmap, it passes to it an offset returned
- * to it earlier by the means of vidioc_querybuf handler. That offset acts as
- * a "cookie", which is then used to identify the plane to be mapped.
- * This function finds a plane with a matching offset and a mapping is performed
- * by the means of a provided memory operation.
- *
- * The return values from this function are intended to be directly returned
- * from the mmap handler in driver.
- */
-int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
-{
- unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
- struct vb2_buffer *vb;
- unsigned int buffer, plane;
- int ret;
- unsigned long length;
-
- if (q->memory != V4L2_MEMORY_MMAP) {
- dprintk(1, "Queue is not currently set up for mmap\n");
- return -EINVAL;
- }
-
- /*
- * Check memory area access mode.
- */
- if (!(vma->vm_flags & VM_SHARED)) {
- dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
- return -EINVAL;
- }
- if (V4L2_TYPE_IS_OUTPUT(q->type)) {
- if (!(vma->vm_flags & VM_WRITE)) {
- dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
- return -EINVAL;
- }
- } else {
- if (!(vma->vm_flags & VM_READ)) {
- dprintk(1, "Invalid vma flags, VM_READ needed\n");
- return -EINVAL;
- }
- }
-
- /*
- * Find the plane corresponding to the offset passed by userspace.
- */
- ret = __find_plane_by_offset(q, off, &buffer, &plane);
- if (ret)
- return ret;
-
- vb = q->bufs[buffer];
-
- /*
- * MMAP requires page_aligned buffers.
- * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
- * so, we need to do the same here.
- */
- length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
- if (length < (vma->vm_end - vma->vm_start)) {
- dprintk(1,
- "MMAP invalid, as it would overflow buffer length\n");
- return -EINVAL;
- }
-
- ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma);
- if (ret)
- return ret;
-
- dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_mmap);
-
-#ifndef CONFIG_MMU
-unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags)
-{
- unsigned long off = pgoff << PAGE_SHIFT;
- struct vb2_buffer *vb;
- unsigned int buffer, plane;
- int ret;
-
- if (q->memory != V4L2_MEMORY_MMAP) {
- dprintk(1, "Queue is not currently set up for mmap\n");
- return -EINVAL;
- }
-
- /*
- * Find the plane corresponding to the offset passed by userspace.
- */
- ret = __find_plane_by_offset(q, off, &buffer, &plane);
- if (ret)
- return ret;
-
- vb = q->bufs[buffer];
-
- return (unsigned long)vb2_plane_vaddr(vb, plane);
-}
-EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
-#endif
-
-static int __vb2_init_fileio(struct vb2_queue *q, int read);
-static int __vb2_cleanup_fileio(struct vb2_queue *q);
-
-/**
- * vb2_poll() - implements poll userspace operation
- * @q: videobuf2 queue
- * @file: file argument passed to the poll file operation handler
- * @wait: wait argument passed to the poll file operation handler
- *
- * This function implements poll file operation handler for a driver.
- * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
- * be informed that the file descriptor of a video device is available for
- * reading.
- * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
- * will be reported as available for writing.
- *
- * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
- * pending events.
- *
- * The return values from this function are intended to be directly returned
- * from poll handler in driver.
- */
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
-{
- struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
- struct vb2_buffer *vb = NULL;
- unsigned int res = 0;
- unsigned long flags;
-
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- struct v4l2_fh *fh = file->private_data;
-
- if (v4l2_event_pending(fh))
- res = POLLPRI;
- else if (req_events & POLLPRI)
- poll_wait(file, &fh->wait, wait);
- }
-
- if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
- return res;
- if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
- return res;
-
- /*
- * Start file I/O emulator only if streaming API has not been used yet.
- */
- if (q->num_buffers == 0 && q->fileio == NULL) {
- if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
- (req_events & (POLLIN | POLLRDNORM))) {
- if (__vb2_init_fileio(q, 1))
- return res | POLLERR;
- }
- if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
- (req_events & (POLLOUT | POLLWRNORM))) {
- if (__vb2_init_fileio(q, 0))
- return res | POLLERR;
- /*
- * Write to OUTPUT queue can be done immediately.
- */
- return res | POLLOUT | POLLWRNORM;
- }
- }
-
- /*
- * There is nothing to wait for if no buffers have already been queued.
- */
- if (list_empty(&q->queued_list))
- return res | POLLERR;
-
- if (list_empty(&q->done_list))
- poll_wait(file, &q->done_wq, wait);
-
- /*
- * Take first buffer available for dequeuing.
- */
- spin_lock_irqsave(&q->done_lock, flags);
- if (!list_empty(&q->done_list))
- vb = list_first_entry(&q->done_list, struct vb2_buffer,
- done_entry);
- spin_unlock_irqrestore(&q->done_lock, flags);
-
- if (vb && (vb->state == VB2_BUF_STATE_DONE
- || vb->state == VB2_BUF_STATE_ERROR)) {
- return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
- res | POLLOUT | POLLWRNORM :
- res | POLLIN | POLLRDNORM;
- }
- return res;
-}
-EXPORT_SYMBOL_GPL(vb2_poll);
-
-/**
- * vb2_queue_init() - initialize a videobuf2 queue
- * @q: videobuf2 queue; this structure should be allocated in driver
- *
- * The vb2_queue structure should be allocated by the driver. The driver is
- * responsible of clearing it's content and setting initial values for some
- * required entries before calling this function.
- * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
- * to the struct vb2_queue description in include/media/videobuf2-core.h
- * for more information.
- */
-int vb2_queue_init(struct vb2_queue *q)
-{
- /*
- * Sanity check
- */
- if (WARN_ON(!q) ||
- WARN_ON(!q->ops) ||
- WARN_ON(!q->mem_ops) ||
- WARN_ON(!q->type) ||
- WARN_ON(!q->io_modes) ||
- WARN_ON(!q->ops->queue_setup) ||
- WARN_ON(!q->ops->buf_queue) ||
- WARN_ON(q->timestamp_type & ~V4L2_BUF_FLAG_TIMESTAMP_MASK))
- return -EINVAL;
-
- /* Warn that the driver should choose an appropriate timestamp type */
- WARN_ON(q->timestamp_type == V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
-
- INIT_LIST_HEAD(&q->queued_list);
- INIT_LIST_HEAD(&q->done_list);
- spin_lock_init(&q->done_lock);
- init_waitqueue_head(&q->done_wq);
-
- if (q->buf_struct_size == 0)
- q->buf_struct_size = sizeof(struct vb2_buffer);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_queue_init);
-
-/**
- * vb2_queue_release() - stop streaming, release the queue and free memory
- * @q: videobuf2 queue
- *
- * This function stops streaming and performs necessary clean ups, including
- * freeing video buffer memory. The driver is responsible for freeing
- * the vb2_queue structure itself.
- */
-void vb2_queue_release(struct vb2_queue *q)
-{
- __vb2_cleanup_fileio(q);
- __vb2_queue_cancel(q);
- __vb2_queue_free(q, q->num_buffers);
-}
-EXPORT_SYMBOL_GPL(vb2_queue_release);
-
-/**
- * struct vb2_fileio_buf - buffer context used by file io emulator
- *
- * vb2 provides a compatibility layer and emulator of file io (read and
- * write) calls on top of streaming API. This structure is used for
- * tracking context related to the buffers.
- */
-struct vb2_fileio_buf {
- void *vaddr;
- unsigned int size;
- unsigned int pos;
- unsigned int queued:1;
-};
-
-/**
- * struct vb2_fileio_data - queue context used by file io emulator
- *
- * vb2 provides a compatibility layer and emulator of file io (read and
- * write) calls on top of streaming API. For proper operation it required
- * this structure to save the driver state between each call of the read
- * or write function.
- */
-struct vb2_fileio_data {
- struct v4l2_requestbuffers req;
- struct v4l2_buffer b;
- struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
- unsigned int index;
- unsigned int q_count;
- unsigned int dq_count;
- unsigned int flags;
-};
-
-/**
- * __vb2_init_fileio() - initialize file io emulator
- * @q: videobuf2 queue
- * @read: mode selector (1 means read, 0 means write)
- */
-static int __vb2_init_fileio(struct vb2_queue *q, int read)
-{
- struct vb2_fileio_data *fileio;
- int i, ret;
- unsigned int count = 0;
-
- /*
- * Sanity check
- */
- if ((read && !(q->io_modes & VB2_READ)) ||
- (!read && !(q->io_modes & VB2_WRITE)))
- BUG();
-
- /*
- * Check if device supports mapping buffers to kernel virtual space.
- */
- if (!q->mem_ops->vaddr)
- return -EBUSY;
-
- /*
- * Check if streaming api has not been already activated.
- */
- if (q->streaming || q->num_buffers > 0)
- return -EBUSY;
-
- /*
- * Start with count 1, driver can increase it in queue_setup()
- */
- count = 1;
-
- dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
- (read) ? "read" : "write", count, q->io_flags);
-
- fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
- if (fileio == NULL)
- return -ENOMEM;
-
- fileio->flags = q->io_flags;
-
- /*
- * Request buffers and use MMAP type to force driver
- * to allocate buffers by itself.
- */
- fileio->req.count = count;
- fileio->req.memory = V4L2_MEMORY_MMAP;
- fileio->req.type = q->type;
- ret = vb2_reqbufs(q, &fileio->req);
- if (ret)
- goto err_kfree;
-
- /*
- * Check if plane_count is correct
- * (multiplane buffers are not supported).
- */
- if (q->bufs[0]->num_planes != 1) {
- ret = -EBUSY;
- goto err_reqbufs;
- }
-
- /*
- * Get kernel address of each buffer.
- */
- for (i = 0; i < q->num_buffers; i++) {
- fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
- if (fileio->bufs[i].vaddr == NULL) {
- ret = -EINVAL;
- goto err_reqbufs;
- }
- fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
- }
-
- /*
- * Read mode requires pre queuing of all buffers.
- */
- if (read) {
- /*
- * Queue all buffers.
- */
- for (i = 0; i < q->num_buffers; i++) {
- struct v4l2_buffer *b = &fileio->b;
- memset(b, 0, sizeof(*b));
- b->type = q->type;
- b->memory = q->memory;
- b->index = i;
- ret = vb2_qbuf(q, b);
- if (ret)
- goto err_reqbufs;
- fileio->bufs[i].queued = 1;
- }
-
- /*
- * Start streaming.
- */
- ret = vb2_streamon(q, q->type);
- if (ret)
- goto err_reqbufs;
- }
-
- q->fileio = fileio;
-
- return ret;
-
-err_reqbufs:
- fileio->req.count = 0;
- vb2_reqbufs(q, &fileio->req);
-
-err_kfree:
- kfree(fileio);
- return ret;
-}
-
-/**
- * __vb2_cleanup_fileio() - free resourced used by file io emulator
- * @q: videobuf2 queue
- */
-static int __vb2_cleanup_fileio(struct vb2_queue *q)
-{
- struct vb2_fileio_data *fileio = q->fileio;
-
- if (fileio) {
- /*
- * Hack fileio context to enable direct calls to vb2 ioctl
- * interface.
- */
- q->fileio = NULL;
-
- vb2_streamoff(q, q->type);
- fileio->req.count = 0;
- vb2_reqbufs(q, &fileio->req);
- kfree(fileio);
- dprintk(3, "file io emulator closed\n");
- }
- return 0;
-}
-
-/**
- * __vb2_perform_fileio() - perform a single file io (read or write) operation
- * @q: videobuf2 queue
- * @data: pointed to target userspace buffer
- * @count: number of bytes to read or write
- * @ppos: file handle position tracking pointer
- * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
- * @read: access mode selector (1 means read, 0 means write)
- */
-static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
- loff_t *ppos, int nonblock, int read)
-{
- struct vb2_fileio_data *fileio;
- struct vb2_fileio_buf *buf;
- int ret, index;
-
- dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
- read ? "read" : "write", (long)*ppos, count,
- nonblock ? "non" : "");
-
- if (!data)
- return -EINVAL;
-
- /*
- * Initialize emulator on first call.
- */
- if (!q->fileio) {
- ret = __vb2_init_fileio(q, read);
- dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
- if (ret)
- return ret;
- }
- fileio = q->fileio;
-
- /*
- * Hack fileio context to enable direct calls to vb2 ioctl interface.
- * The pointer will be restored before returning from this function.
- */
- q->fileio = NULL;
-
- index = fileio->index;
- buf = &fileio->bufs[index];
-
- /*
- * Check if we need to dequeue the buffer.
- */
- if (buf->queued) {
- struct vb2_buffer *vb;
-
- /*
- * Call vb2_dqbuf to get buffer back.
- */
- memset(&fileio->b, 0, sizeof(fileio->b));
- fileio->b.type = q->type;
- fileio->b.memory = q->memory;
- fileio->b.index = index;
- ret = vb2_dqbuf(q, &fileio->b, nonblock);
- dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
- if (ret)
- goto end;
- fileio->dq_count += 1;
-
- /*
- * Get number of bytes filled by the driver
- */
- vb = q->bufs[index];
- buf->size = vb2_get_plane_payload(vb, 0);
- buf->queued = 0;
- }
-
- /*
- * Limit count on last few bytes of the buffer.
- */
- if (buf->pos + count > buf->size) {
- count = buf->size - buf->pos;
- dprintk(5, "reducing read count: %zd\n", count);
- }
-
- /*
- * Transfer data to userspace.
- */
- dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
- count, index, buf->pos);
- if (read)
- ret = copy_to_user(data, buf->vaddr + buf->pos, count);
- else
- ret = copy_from_user(buf->vaddr + buf->pos, data, count);
- if (ret) {
- dprintk(3, "file io: error copying data\n");
- ret = -EFAULT;
- goto end;
- }
-
- /*
- * Update counters.
- */
- buf->pos += count;
- *ppos += count;
-
- /*
- * Queue next buffer if required.
- */
- if (buf->pos == buf->size ||
- (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
- /*
- * Check if this is the last buffer to read.
- */
- if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
- fileio->dq_count == 1) {
- dprintk(3, "file io: read limit reached\n");
- /*
- * Restore fileio pointer and release the context.
- */
- q->fileio = fileio;
- return __vb2_cleanup_fileio(q);
- }
-
- /*
- * Call vb2_qbuf and give buffer to the driver.
- */
- memset(&fileio->b, 0, sizeof(fileio->b));
- fileio->b.type = q->type;
- fileio->b.memory = q->memory;
- fileio->b.index = index;
- fileio->b.bytesused = buf->pos;
- ret = vb2_qbuf(q, &fileio->b);
- dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
- if (ret)
- goto end;
-
- /*
- * Buffer has been queued, update the status
- */
- buf->pos = 0;
- buf->queued = 1;
- buf->size = q->bufs[0]->v4l2_planes[0].length;
- fileio->q_count += 1;
-
- /*
- * Switch to the next buffer
- */
- fileio->index = (index + 1) % q->num_buffers;
-
- /*
- * Start streaming if required.
- */
- if (!read && !q->streaming) {
- ret = vb2_streamon(q, q->type);
- if (ret)
- goto end;
- }
- }
-
- /*
- * Return proper number of bytes processed.
- */
- if (ret == 0)
- ret = count;
-end:
- /*
- * Restore the fileio context and block vb2 ioctl interface.
- */
- q->fileio = fileio;
- return ret;
-}
-
-size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
- loff_t *ppos, int nonblocking)
-{
- return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
-}
-EXPORT_SYMBOL_GPL(vb2_read);
-
-size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
- loff_t *ppos, int nonblocking)
-{
- return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
-}
-EXPORT_SYMBOL_GPL(vb2_write);
-
-
-/*
- * The following functions are not part of the vb2 core API, but are helper
- * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
- * and struct vb2_ops.
- * They contain boilerplate code that most if not all drivers have to do
- * and so they simplify the driver code.
- */
-
-/* The queue is busy if there is a owner and you are not that owner. */
-static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
-{
- return vdev->queue->owner && vdev->queue->owner != file->private_data;
-}
-
-/* vb2 ioctl helpers */
-
-int vb2_ioctl_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct video_device *vdev = video_devdata(file);
- int res = __verify_memory_type(vdev->queue, p->memory, p->type);
-
- if (res)
- return res;
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- res = __reqbufs(vdev->queue, p);
- /* If count == 0, then the owner has released all buffers and he
- is no longer owner of the queue. Otherwise we have a new owner. */
- if (res == 0)
- vdev->queue->owner = p->count ? file->private_data : NULL;
- return res;
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
-
-int vb2_ioctl_create_bufs(struct file *file, void *priv,
- struct v4l2_create_buffers *p)
-{
- struct video_device *vdev = video_devdata(file);
- int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
-
- p->index = vdev->queue->num_buffers;
- /* If count == 0, then just check if memory and type are valid.
- Any -EBUSY result from __verify_memory_type can be mapped to 0. */
- if (p->count == 0)
- return res != -EBUSY ? res : 0;
- if (res)
- return res;
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- res = __create_bufs(vdev->queue, p);
- if (res == 0)
- vdev->queue->owner = file->private_data;
- return res;
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
-
-int vb2_ioctl_prepare_buf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_prepare_buf(vdev->queue, p);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
-
-int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
- return vb2_querybuf(vdev->queue, p);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
-
-int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_qbuf(vdev->queue, p);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
-
-int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
-
-int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_streamon(vdev->queue, i);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
-
-int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_streamoff(vdev->queue, i);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
-
-int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (vb2_queue_is_busy(vdev, file))
- return -EBUSY;
- return vb2_expbuf(vdev->queue, p);
-}
-EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
-
-/* v4l2_file_operations helpers */
-
-int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct video_device *vdev = video_devdata(file);
-
- return vb2_mmap(vdev->queue, vma);
-}
-EXPORT_SYMBOL_GPL(vb2_fop_mmap);
-
-int vb2_fop_release(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (file->private_data == vdev->queue->owner) {
- vb2_queue_release(vdev->queue);
- vdev->queue->owner = NULL;
- }
- return v4l2_fh_release(file);
-}
-EXPORT_SYMBOL_GPL(vb2_fop_release);
-
-ssize_t vb2_fop_write(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct video_device *vdev = video_devdata(file);
- struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
- int err = -EBUSY;
-
- if (lock && mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
- if (vb2_queue_is_busy(vdev, file))
- goto exit;
- err = vb2_write(vdev->queue, buf, count, ppos,
- file->f_flags & O_NONBLOCK);
- if (vdev->queue->fileio)
- vdev->queue->owner = file->private_data;
-exit:
- if (lock)
- mutex_unlock(lock);
- return err;
-}
-EXPORT_SYMBOL_GPL(vb2_fop_write);
-
-ssize_t vb2_fop_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct video_device *vdev = video_devdata(file);
- struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
- int err = -EBUSY;
-
- if (lock && mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
- if (vb2_queue_is_busy(vdev, file))
- goto exit;
- err = vb2_read(vdev->queue, buf, count, ppos,
- file->f_flags & O_NONBLOCK);
- if (vdev->queue->fileio)
- vdev->queue->owner = file->private_data;
-exit:
- if (lock)
- mutex_unlock(lock);
- return err;
-}
-EXPORT_SYMBOL_GPL(vb2_fop_read);
-
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
-{
- struct video_device *vdev = video_devdata(file);
- struct vb2_queue *q = vdev->queue;
- struct mutex *lock = q->lock ? q->lock : vdev->lock;
- unsigned long req_events = poll_requested_events(wait);
- unsigned res;
- void *fileio;
- bool must_lock = false;
-
- /* Try to be smart: only lock if polling might start fileio,
- otherwise locking will only introduce unwanted delays. */
- if (q->num_buffers == 0 && q->fileio == NULL) {
- if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
- (req_events & (POLLIN | POLLRDNORM)))
- must_lock = true;
- else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
- (req_events & (POLLOUT | POLLWRNORM)))
- must_lock = true;
- }
-
- /* If locking is needed, but this helper doesn't know how, then you
- shouldn't be using this helper but you should write your own. */
- WARN_ON(must_lock && !lock);
-
- if (must_lock && lock && mutex_lock_interruptible(lock))
- return POLLERR;
-
- fileio = q->fileio;
-
- res = vb2_poll(vdev->queue, file, wait);
-
- /* If fileio was started, then we have a new queue owner. */
- if (must_lock && !fileio && q->fileio)
- q->owner = file->private_data;
- if (must_lock && lock)
- mutex_unlock(lock);
- return res;
-}
-EXPORT_SYMBOL_GPL(vb2_fop_poll);
-
-#ifndef CONFIG_MMU
-unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct video_device *vdev = video_devdata(file);
-
- return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
-}
-EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
-#endif
-
-/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
-
-void vb2_ops_wait_prepare(struct vb2_queue *vq)
-{
- mutex_unlock(vq->lock);
-}
-EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
-
-void vb2_ops_wait_finish(struct vb2_queue *vq)
-{
- mutex_lock(vq->lock);
-}
-EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
-
-MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
-MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
deleted file mode 100644
index fd56f2563201..000000000000
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
- *
- * Copyright (C) 2010 Samsung Electronics
- *
- * Author: Pawel Osciak <pawel@osciak.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- */
-
-#include <linux/dma-buf.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-dma-contig.h>
-#include <media/videobuf2-memops.h>
-
-struct vb2_dc_conf {
- struct device *dev;
-};
-
-struct vb2_dc_buf {
- struct device *dev;
- void *vaddr;
- unsigned long size;
- dma_addr_t dma_addr;
- enum dma_data_direction dma_dir;
- struct sg_table *dma_sgt;
-
- /* MMAP related */
- struct vb2_vmarea_handler handler;
- atomic_t refcount;
- struct sg_table *sgt_base;
-
- /* USERPTR related */
- struct vm_area_struct *vma;
-
- /* DMABUF related */
- struct dma_buf_attachment *db_attach;
-};
-
-/*********************************************/
-/* scatterlist table functions */
-/*********************************************/
-
-
-static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
- void (*cb)(struct page *pg))
-{
- struct scatterlist *s;
- unsigned int i;
-
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
- struct page *page = sg_page(s);
- unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
- >> PAGE_SHIFT;
- unsigned int j;
-
- for (j = 0; j < n_pages; ++j, ++page)
- cb(page);
- }
-}
-
-static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
-{
- struct scatterlist *s;
- dma_addr_t expected = sg_dma_address(sgt->sgl);
- unsigned int i;
- unsigned long size = 0;
-
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
- if (sg_dma_address(s) != expected)
- break;
- expected = sg_dma_address(s) + sg_dma_len(s);
- size += sg_dma_len(s);
- }
- return size;
-}
-
-/*********************************************/
-/* callbacks for all buffers */
-/*********************************************/
-
-static void *vb2_dc_cookie(void *buf_priv)
-{
- struct vb2_dc_buf *buf = buf_priv;
-
- return &buf->dma_addr;
-}
-
-static void *vb2_dc_vaddr(void *buf_priv)
-{
- struct vb2_dc_buf *buf = buf_priv;
-
- return buf->vaddr;
-}
-
-static unsigned int vb2_dc_num_users(void *buf_priv)
-{
- struct vb2_dc_buf *buf = buf_priv;
-
- return atomic_read(&buf->refcount);
-}
-
-static void vb2_dc_prepare(void *buf_priv)
-{
- struct vb2_dc_buf *buf = buf_priv;
- struct sg_table *sgt = buf->dma_sgt;
-
- /* DMABUF exporter will flush the cache for us */
- if (!sgt || buf->db_attach)
- return;
-
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
-}
-
-static void vb2_dc_finish(void *buf_priv)
-{
- struct vb2_dc_buf *buf = buf_priv;
- struct sg_table *sgt = buf->dma_sgt;
-
- /* DMABUF exporter will flush the cache for us */
- if (!sgt || buf->db_attach)
- return;
-
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
-}
-
-/*********************************************/
-/* callbacks for MMAP buffers */
-/*********************************************/
-
-static void vb2_dc_put(void *buf_priv)
-{
- struct vb2_dc_buf *buf = buf_priv;
-
- if (!atomic_dec_and_test(&buf->refcount))
- return;
-
- if (buf->sgt_base) {
- sg_free_table(buf->sgt_base);
- kfree(buf->sgt_base);
- }
- dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
- put_device(buf->dev);
- kfree(buf);
-}
-
-static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
-{
- struct vb2_dc_conf *conf = alloc_ctx;
- struct device *dev = conf->dev;
- struct vb2_dc_buf *buf;
-
- buf = kzalloc(sizeof *buf, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags);
- if (!buf->vaddr) {
- dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
- kfree(buf);
- return ERR_PTR(-ENOMEM);
- }
-
- /* Prevent the device from being released while the buffer is used */
- buf->dev = get_device(dev);
- buf->size = size;
-
- buf->handler.refcount = &buf->refcount;
- buf->handler.put = vb2_dc_put;
- buf->handler.arg = buf;
-
- atomic_inc(&buf->refcount);
-
- return buf;
-}
-
-static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
-{
- struct vb2_dc_buf *buf = buf_priv;
- int ret;
-
- if (!buf) {
- printk(KERN_ERR "No buffer to map\n");
- return -EINVAL;
- }
-
- /*
- * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
- * map whole buffer
- */
- vma->vm_pgoff = 0;
-
- ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
- buf->dma_addr, buf->size);
-
- if (ret) {
- pr_err("Remapping memory failed, error: %d\n", ret);
- return ret;
- }
-
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = &buf->handler;
- vma->vm_ops = &vb2_common_vm_ops;
-
- vma->vm_ops->open(vma);
-
- pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
- __func__, (unsigned long)buf->dma_addr, vma->vm_start,
- buf->size);
-
- return 0;
-}
-
-/*********************************************/
-/* DMABUF ops for exporters */
-/*********************************************/
-
-struct vb2_dc_attachment {
- struct sg_table sgt;
- enum dma_data_direction dir;
-};
-
-static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
- struct dma_buf_attachment *dbuf_attach)
-{
- struct vb2_dc_attachment *attach;
- unsigned int i;
- struct scatterlist *rd, *wr;
- struct sg_table *sgt;
- struct vb2_dc_buf *buf = dbuf->priv;
- int ret;
-
- attach = kzalloc(sizeof(*attach), GFP_KERNEL);
- if (!attach)
- return -ENOMEM;
-
- sgt = &attach->sgt;
- /* Copy the buf->base_sgt scatter list to the attachment, as we can't
- * map the same scatter list to multiple attachments at the same time.
- */
- ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
- if (ret) {
- kfree(attach);
- return -ENOMEM;
- }
-
- rd = buf->sgt_base->sgl;
- wr = sgt->sgl;
- for (i = 0; i < sgt->orig_nents; ++i) {
- sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
- rd = sg_next(rd);
- wr = sg_next(wr);
- }
-
- attach->dir = DMA_NONE;
- dbuf_attach->priv = attach;
-
- return 0;
-}
-
-static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
- struct dma_buf_attachment *db_attach)
-{
- struct vb2_dc_attachment *attach = db_attach->priv;
- struct sg_table *sgt;
-
- if (!attach)
- return;
-
- sgt = &attach->sgt;
-
- /* release the scatterlist cache */
- if (attach->dir != DMA_NONE)
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dir);
- sg_free_table(sgt);
- kfree(attach);
- db_attach->priv = NULL;
-}
-
-static struct sg_table *vb2_dc_dmabuf_ops_map(
- struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
-{
- struct vb2_dc_attachment *attach = db_attach->priv;
- /* stealing dmabuf mutex to serialize map/unmap operations */
- struct mutex *lock = &db_attach->dmabuf->lock;
- struct sg_table *sgt;
- int ret;
-
- mutex_lock(lock);
-
- sgt = &attach->sgt;
- /* return previously mapped sg table */
- if (attach->dir == dir) {
- mutex_unlock(lock);
- return sgt;
- }
-
- /* release any previous cache */
- if (attach->dir != DMA_NONE) {
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dir);
- attach->dir = DMA_NONE;
- }
-
- /* mapping to the client with new direction */
- ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
- if (ret <= 0) {
- pr_err("failed to map scatterlist\n");
- mutex_unlock(lock);
- return ERR_PTR(-EIO);
- }
-
- attach->dir = dir;
-
- mutex_unlock(lock);
-
- return sgt;
-}
-
-static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
- struct sg_table *sgt, enum dma_data_direction dir)
-{
- /* nothing to be done here */
-}
-
-static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
-{
- /* drop reference obtained in vb2_dc_get_dmabuf */
- vb2_dc_put(dbuf->priv);
-}
-
-static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
-{
- struct vb2_dc_buf *buf = dbuf->priv;
-
- return buf->vaddr + pgnum * PAGE_SIZE;
-}
-
-static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
-{
- struct vb2_dc_buf *buf = dbuf->priv;
-
- return buf->vaddr;
-}
-
-static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
- struct vm_area_struct *vma)
-{
- return vb2_dc_mmap(dbuf->priv, vma);
-}
-
-static struct dma_buf_ops vb2_dc_dmabuf_ops = {
- .attach = vb2_dc_dmabuf_ops_attach,
- .detach = vb2_dc_dmabuf_ops_detach,
- .map_dma_buf = vb2_dc_dmabuf_ops_map,
- .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
- .kmap = vb2_dc_dmabuf_ops_kmap,
- .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
- .vmap = vb2_dc_dmabuf_ops_vmap,
- .mmap = vb2_dc_dmabuf_ops_mmap,
- .release = vb2_dc_dmabuf_ops_release,
-};
-
-static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
-{
- int ret;
- struct sg_table *sgt;
-
- sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt) {
- dev_err(buf->dev, "failed to alloc sg table\n");
- return NULL;
- }
-
- ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
- buf->size);
- if (ret < 0) {
- dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
- kfree(sgt);
- return NULL;
- }
-
- return sgt;
-}
-
-static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
-{
- struct vb2_dc_buf *buf = buf_priv;
- struct dma_buf *dbuf;
-
- if (!buf->sgt_base)
- buf->sgt_base = vb2_dc_get_base_sgt(buf);
-
- if (WARN_ON(!buf->sgt_base))
- return NULL;
-
- dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
- if (IS_ERR(dbuf))
- return NULL;
-
- /* dmabuf keeps reference to vb2 buffer */
- atomic_inc(&buf->refcount);
-
- return dbuf;
-}
-
-/*********************************************/
-/* callbacks for USERPTR buffers */
-/*********************************************/
-
-static inline int vma_is_io(struct vm_area_struct *vma)
-{
- return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
-}
-
-static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
- int n_pages, struct vm_area_struct *vma, int write)
-{
- if (vma_is_io(vma)) {
- unsigned int i;
-
- for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
- unsigned long pfn;
- int ret = follow_pfn(vma, start, &pfn);
-
- if (ret) {
- pr_err("no page for address %lu\n", start);
- return ret;
- }
- pages[i] = pfn_to_page(pfn);
- }
- } else {
- int n;
-
- n = get_user_pages(current, current->mm, start & PAGE_MASK,
- n_pages, write, 1, pages, NULL);
- /* negative error means that no page was pinned */
- n = max(n, 0);
- if (n != n_pages) {
- pr_err("got only %d of %d user pages\n", n, n_pages);
- while (n)
- put_page(pages[--n]);
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
-static void vb2_dc_put_dirty_page(struct page *page)
-{
- set_page_dirty_lock(page);
- put_page(page);
-}
-
-static void vb2_dc_put_userptr(void *buf_priv)
-{
- struct vb2_dc_buf *buf = buf_priv;
- struct sg_table *sgt = buf->dma_sgt;
-
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
- if (!vma_is_io(buf->vma))
- vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
-
- sg_free_table(sgt);
- kfree(sgt);
- vb2_put_vma(buf->vma);
- kfree(buf);
-}
-
-static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
-{
- struct vb2_dc_conf *conf = alloc_ctx;
- struct vb2_dc_buf *buf;
- unsigned long start;
- unsigned long end;
- unsigned long offset;
- struct page **pages;
- int n_pages;
- int ret = 0;
- struct vm_area_struct *vma;
- struct sg_table *sgt;
- unsigned long contig_size;
- unsigned long dma_align = dma_get_cache_alignment();
-
- /* Only cache aligned DMA transfers are reliable */
- if (!IS_ALIGNED(vaddr | size, dma_align)) {
- pr_debug("user data must be aligned to %lu bytes\n", dma_align);
- return ERR_PTR(-EINVAL);
- }
-
- if (!size) {
- pr_debug("size is zero\n");
- return ERR_PTR(-EINVAL);
- }
-
- buf = kzalloc(sizeof *buf, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- buf->dev = conf->dev;
- buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- start = vaddr & PAGE_MASK;
- offset = vaddr & ~PAGE_MASK;
- end = PAGE_ALIGN(vaddr + size);
- n_pages = (end - start) >> PAGE_SHIFT;
-
- pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- pr_err("failed to allocate pages table\n");
- goto fail_buf;
- }
-
- /* current->mm->mmap_sem is taken by videobuf2 core */
- vma = find_vma(current->mm, vaddr);
- if (!vma) {
- pr_err("no vma for address %lu\n", vaddr);
- ret = -EFAULT;
- goto fail_pages;
- }
-
- if (vma->vm_end < vaddr + size) {
- pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
- ret = -EFAULT;
- goto fail_pages;
- }
-
- buf->vma = vb2_get_vma(vma);
- if (!buf->vma) {
- pr_err("failed to copy vma\n");
- ret = -ENOMEM;
- goto fail_pages;
- }
-
- /* extract page list from userspace mapping */
- ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
- if (ret) {
- pr_err("failed to get user pages\n");
- goto fail_vma;
- }
-
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt) {
- pr_err("failed to allocate sg table\n");
- ret = -ENOMEM;
- goto fail_get_user_pages;
- }
-
- ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
- offset, size, GFP_KERNEL);
- if (ret) {
- pr_err("failed to initialize sg table\n");
- goto fail_sgt;
- }
-
- /* pages are no longer needed */
- kfree(pages);
- pages = NULL;
-
- sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir);
- if (sgt->nents <= 0) {
- pr_err("failed to map scatterlist\n");
- ret = -EIO;
- goto fail_sgt_init;
- }
-
- contig_size = vb2_dc_get_contiguous_size(sgt);
- if (contig_size < size) {
- pr_err("contiguous mapping is too small %lu/%lu\n",
- contig_size, size);
- ret = -EFAULT;
- goto fail_map_sg;
- }
-
- buf->dma_addr = sg_dma_address(sgt->sgl);
- buf->size = size;
- buf->dma_sgt = sgt;
-
- return buf;
-
-fail_map_sg:
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
-
-fail_sgt_init:
- if (!vma_is_io(buf->vma))
- vb2_dc_sgt_foreach_page(sgt, put_page);
- sg_free_table(sgt);
-
-fail_sgt:
- kfree(sgt);
-
-fail_get_user_pages:
- if (pages && !vma_is_io(buf->vma))
- while (n_pages)
- put_page(pages[--n_pages]);
-
-fail_vma:
- vb2_put_vma(buf->vma);
-
-fail_pages:
- kfree(pages); /* kfree is NULL-proof */
-
-fail_buf:
- kfree(buf);
-
- return ERR_PTR(ret);
-}
-
-/*********************************************/
-/* callbacks for DMABUF buffers */
-/*********************************************/
-
-static int vb2_dc_map_dmabuf(void *mem_priv)
-{
- struct vb2_dc_buf *buf = mem_priv;
- struct sg_table *sgt;
- unsigned long contig_size;
-
- if (WARN_ON(!buf->db_attach)) {
- pr_err("trying to pin a non attached buffer\n");
- return -EINVAL;
- }
-
- if (WARN_ON(buf->dma_sgt)) {
- pr_err("dmabuf buffer is already pinned\n");
- return 0;
- }
-
- /* get the associated scatterlist for this buffer */
- sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
- if (IS_ERR_OR_NULL(sgt)) {
- pr_err("Error getting dmabuf scatterlist\n");
- return -EINVAL;
- }
-
- /* checking if dmabuf is big enough to store contiguous chunk */
- contig_size = vb2_dc_get_contiguous_size(sgt);
- if (contig_size < buf->size) {
- pr_err("contiguous chunk is too small %lu/%lu b\n",
- contig_size, buf->size);
- dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
- return -EFAULT;
- }
-
- buf->dma_addr = sg_dma_address(sgt->sgl);
- buf->dma_sgt = sgt;
-
- return 0;
-}
-
-static void vb2_dc_unmap_dmabuf(void *mem_priv)
-{
- struct vb2_dc_buf *buf = mem_priv;
- struct sg_table *sgt = buf->dma_sgt;
-
- if (WARN_ON(!buf->db_attach)) {
- pr_err("trying to unpin a not attached buffer\n");
- return;
- }
-
- if (WARN_ON(!sgt)) {
- pr_err("dmabuf buffer is already unpinned\n");
- return;
- }
-
- dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
-
- buf->dma_addr = 0;
- buf->dma_sgt = NULL;
-}
-
-static void vb2_dc_detach_dmabuf(void *mem_priv)
-{
- struct vb2_dc_buf *buf = mem_priv;
-
- /* if vb2 works correctly you should never detach mapped buffer */
- if (WARN_ON(buf->dma_addr))
- vb2_dc_unmap_dmabuf(buf);
-
- /* detach this attachment */
- dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
- kfree(buf);
-}
-
-static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
- unsigned long size, int write)
-{
- struct vb2_dc_conf *conf = alloc_ctx;
- struct vb2_dc_buf *buf;
- struct dma_buf_attachment *dba;
-
- if (dbuf->size < size)
- return ERR_PTR(-EFAULT);
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- buf->dev = conf->dev;
- /* create attachment for the dmabuf with the user device */
- dba = dma_buf_attach(dbuf, buf->dev);
- if (IS_ERR(dba)) {
- pr_err("failed to attach dmabuf\n");
- kfree(buf);
- return dba;
- }
-
- buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- buf->size = size;
- buf->db_attach = dba;
-
- return buf;
-}
-
-/*********************************************/
-/* DMA CONTIG exported functions */
-/*********************************************/
-
-const struct vb2_mem_ops vb2_dma_contig_memops = {
- .alloc = vb2_dc_alloc,
- .put = vb2_dc_put,
- .get_dmabuf = vb2_dc_get_dmabuf,
- .cookie = vb2_dc_cookie,
- .vaddr = vb2_dc_vaddr,
- .mmap = vb2_dc_mmap,
- .get_userptr = vb2_dc_get_userptr,
- .put_userptr = vb2_dc_put_userptr,
- .prepare = vb2_dc_prepare,
- .finish = vb2_dc_finish,
- .map_dmabuf = vb2_dc_map_dmabuf,
- .unmap_dmabuf = vb2_dc_unmap_dmabuf,
- .attach_dmabuf = vb2_dc_attach_dmabuf,
- .detach_dmabuf = vb2_dc_detach_dmabuf,
- .num_users = vb2_dc_num_users,
-};
-EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
-
-void *vb2_dma_contig_init_ctx(struct device *dev)
-{
- struct vb2_dc_conf *conf;
-
- conf = kzalloc(sizeof *conf, GFP_KERNEL);
- if (!conf)
- return ERR_PTR(-ENOMEM);
-
- conf->dev = dev;
-
- return conf;
-}
-EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
-
-void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
-{
- kfree(alloc_ctx);
-}
-EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
-
-MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
-MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
deleted file mode 100644
index 16ae3dcc7e29..000000000000
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
- *
- * Copyright (C) 2010 Samsung Electronics
- *
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-memops.h>
-#include <media/videobuf2-dma-sg.h>
-
-static int debug;
-module_param(debug, int, 0644);
-
-#define dprintk(level, fmt, arg...) \
- do { \
- if (debug >= level) \
- printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
- } while (0)
-
-struct vb2_dma_sg_buf {
- void *vaddr;
- struct page **pages;
- int write;
- int offset;
- struct vb2_dma_sg_desc sg_desc;
- atomic_t refcount;
- struct vb2_vmarea_handler handler;
-};
-
-static void vb2_dma_sg_put(void *buf_priv);
-
-static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
-{
- struct vb2_dma_sg_buf *buf;
- int i;
-
- buf = kzalloc(sizeof *buf, GFP_KERNEL);
- if (!buf)
- return NULL;
-
- buf->vaddr = NULL;
- buf->write = 0;
- buf->offset = 0;
- buf->sg_desc.size = size;
- /* size is already page aligned */
- buf->sg_desc.num_pages = size >> PAGE_SHIFT;
-
- buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
- sizeof(*buf->sg_desc.sglist));
- if (!buf->sg_desc.sglist)
- goto fail_sglist_alloc;
- sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
-
- buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
- GFP_KERNEL);
- if (!buf->pages)
- goto fail_pages_array_alloc;
-
- for (i = 0; i < buf->sg_desc.num_pages; ++i) {
- buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO |
- __GFP_NOWARN | gfp_flags);
- if (NULL == buf->pages[i])
- goto fail_pages_alloc;
- sg_set_page(&buf->sg_desc.sglist[i],
- buf->pages[i], PAGE_SIZE, 0);
- }
-
- buf->handler.refcount = &buf->refcount;
- buf->handler.put = vb2_dma_sg_put;
- buf->handler.arg = buf;
-
- atomic_inc(&buf->refcount);
-
- dprintk(1, "%s: Allocated buffer of %d pages\n",
- __func__, buf->sg_desc.num_pages);
- return buf;
-
-fail_pages_alloc:
- while (--i >= 0)
- __free_page(buf->pages[i]);
- kfree(buf->pages);
-
-fail_pages_array_alloc:
- vfree(buf->sg_desc.sglist);
-
-fail_sglist_alloc:
- kfree(buf);
- return NULL;
-}
-
-static void vb2_dma_sg_put(void *buf_priv)
-{
- struct vb2_dma_sg_buf *buf = buf_priv;
- int i = buf->sg_desc.num_pages;
-
- if (atomic_dec_and_test(&buf->refcount)) {
- dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
- buf->sg_desc.num_pages);
- if (buf->vaddr)
- vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
- vfree(buf->sg_desc.sglist);
- while (--i >= 0)
- __free_page(buf->pages[i]);
- kfree(buf->pages);
- kfree(buf);
- }
-}
-
-static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
-{
- struct vb2_dma_sg_buf *buf;
- unsigned long first, last;
- int num_pages_from_user, i;
-
- buf = kzalloc(sizeof *buf, GFP_KERNEL);
- if (!buf)
- return NULL;
-
- buf->vaddr = NULL;
- buf->write = write;
- buf->offset = vaddr & ~PAGE_MASK;
- buf->sg_desc.size = size;
-
- first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
- last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
- buf->sg_desc.num_pages = last - first + 1;
-
- buf->sg_desc.sglist = vzalloc(
- buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
- if (!buf->sg_desc.sglist)
- goto userptr_fail_sglist_alloc;
-
- sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
-
- buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
- GFP_KERNEL);
- if (!buf->pages)
- goto userptr_fail_pages_array_alloc;
-
- num_pages_from_user = get_user_pages(current, current->mm,
- vaddr & PAGE_MASK,
- buf->sg_desc.num_pages,
- write,
- 1, /* force */
- buf->pages,
- NULL);
-
- if (num_pages_from_user != buf->sg_desc.num_pages)
- goto userptr_fail_get_user_pages;
-
- sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
- PAGE_SIZE - buf->offset, buf->offset);
- size -= PAGE_SIZE - buf->offset;
- for (i = 1; i < buf->sg_desc.num_pages; ++i) {
- sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
- min_t(size_t, PAGE_SIZE, size), 0);
- size -= min_t(size_t, PAGE_SIZE, size);
- }
- return buf;
-
-userptr_fail_get_user_pages:
- dprintk(1, "get_user_pages requested/got: %d/%d]\n",
- num_pages_from_user, buf->sg_desc.num_pages);
- while (--num_pages_from_user >= 0)
- put_page(buf->pages[num_pages_from_user]);
- kfree(buf->pages);
-
-userptr_fail_pages_array_alloc:
- vfree(buf->sg_desc.sglist);
-
-userptr_fail_sglist_alloc:
- kfree(buf);
- return NULL;
-}
-
-/*
- * @put_userptr: inform the allocator that a USERPTR buffer will no longer
- * be used
- */
-static void vb2_dma_sg_put_userptr(void *buf_priv)
-{
- struct vb2_dma_sg_buf *buf = buf_priv;
- int i = buf->sg_desc.num_pages;
-
- dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
- __func__, buf->sg_desc.num_pages);
- if (buf->vaddr)
- vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
- while (--i >= 0) {
- if (buf->write)
- set_page_dirty_lock(buf->pages[i]);
- put_page(buf->pages[i]);
- }
- vfree(buf->sg_desc.sglist);
- kfree(buf->pages);
- kfree(buf);
-}
-
-static void *vb2_dma_sg_vaddr(void *buf_priv)
-{
- struct vb2_dma_sg_buf *buf = buf_priv;
-
- BUG_ON(!buf);
-
- if (!buf->vaddr)
- buf->vaddr = vm_map_ram(buf->pages,
- buf->sg_desc.num_pages,
- -1,
- PAGE_KERNEL);
-
- /* add offset in case userptr is not page-aligned */
- return buf->vaddr + buf->offset;
-}
-
-static unsigned int vb2_dma_sg_num_users(void *buf_priv)
-{
- struct vb2_dma_sg_buf *buf = buf_priv;
-
- return atomic_read(&buf->refcount);
-}
-
-static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
-{
- struct vb2_dma_sg_buf *buf = buf_priv;
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
- int i = 0;
-
- if (!buf) {
- printk(KERN_ERR "No memory to map\n");
- return -EINVAL;
- }
-
- do {
- int ret;
-
- ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
- if (ret) {
- printk(KERN_ERR "Remapping memory, error: %d\n", ret);
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
-
-
- /*
- * Use common vm_area operations to track buffer refcount.
- */
- vma->vm_private_data = &buf->handler;
- vma->vm_ops = &vb2_common_vm_ops;
-
- vma->vm_ops->open(vma);
-
- return 0;
-}
-
-static void *vb2_dma_sg_cookie(void *buf_priv)
-{
- struct vb2_dma_sg_buf *buf = buf_priv;
-
- return &buf->sg_desc;
-}
-
-const struct vb2_mem_ops vb2_dma_sg_memops = {
- .alloc = vb2_dma_sg_alloc,
- .put = vb2_dma_sg_put,
- .get_userptr = vb2_dma_sg_get_userptr,
- .put_userptr = vb2_dma_sg_put_userptr,
- .vaddr = vb2_dma_sg_vaddr,
- .mmap = vb2_dma_sg_mmap,
- .num_users = vb2_dma_sg_num_users,
- .cookie = vb2_dma_sg_cookie,
-};
-EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
-
-MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
-MODULE_AUTHOR("Andrzej Pietrasiewicz");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
deleted file mode 100644
index 81c1ad8b2cf1..000000000000
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * videobuf2-memops.c - generic memory handling routines for videobuf2
- *
- * Copyright (C) 2010 Samsung Electronics
- *
- * Author: Pawel Osciak <pawel@osciak.com>
- * Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/file.h>
-
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-memops.h>
-
-/**
- * vb2_get_vma() - acquire and lock the virtual memory area
- * @vma: given virtual memory area
- *
- * This function attempts to acquire an area mapped in the userspace for
- * the duration of a hardware operation. The area is "locked" by performing
- * the same set of operation that are done when process calls fork() and
- * memory areas are duplicated.
- *
- * Returns a copy of a virtual memory region on success or NULL.
- */
-struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
-{
- struct vm_area_struct *vma_copy;
-
- vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
- if (vma_copy == NULL)
- return NULL;
-
- if (vma->vm_ops && vma->vm_ops->open)
- vma->vm_ops->open(vma);
-
- if (vma->vm_file)
- get_file(vma->vm_file);
-
- memcpy(vma_copy, vma, sizeof(*vma));
-
- vma_copy->vm_mm = NULL;
- vma_copy->vm_next = NULL;
- vma_copy->vm_prev = NULL;
-
- return vma_copy;
-}
-EXPORT_SYMBOL_GPL(vb2_get_vma);
-
-/**
- * vb2_put_userptr() - release a userspace virtual memory area
- * @vma: virtual memory region associated with the area to be released
- *
- * This function releases the previously acquired memory area after a hardware
- * operation.
- */
-void vb2_put_vma(struct vm_area_struct *vma)
-{
- if (!vma)
- return;
-
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
-
- if (vma->vm_file)
- fput(vma->vm_file);
-
- kfree(vma);
-}
-EXPORT_SYMBOL_GPL(vb2_put_vma);
-
-/**
- * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
- * @vaddr: starting virtual address of the area to be verified
- * @size: size of the area
- * @res_paddr: will return physical address for the given vaddr
- * @res_vma: will return locked copy of struct vm_area for the given area
- *
- * This function will go through memory area of size @size mapped at @vaddr and
- * verify that the underlying physical pages are contiguous. If they are
- * contiguous the virtual memory area is locked and a @res_vma is filled with
- * the copy and @res_pa set to the physical address of the buffer.
- *
- * Returns 0 on success.
- */
-int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
- struct vm_area_struct **res_vma, dma_addr_t *res_pa)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long offset, start, end;
- unsigned long this_pfn, prev_pfn;
- dma_addr_t pa = 0;
-
- start = vaddr;
- offset = start & ~PAGE_MASK;
- end = start + size;
-
- vma = find_vma(mm, start);
-
- if (vma == NULL || vma->vm_end < end)
- return -EFAULT;
-
- for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
- int ret = follow_pfn(vma, start, &this_pfn);
- if (ret)
- return ret;
-
- if (prev_pfn == 0)
- pa = this_pfn << PAGE_SHIFT;
- else if (this_pfn != prev_pfn + 1)
- return -EFAULT;
-
- prev_pfn = this_pfn;
- }
-
- /*
- * Memory is contigous, lock vma and return to the caller
- */
- *res_vma = vb2_get_vma(vma);
- if (*res_vma == NULL)
- return -ENOMEM;
-
- *res_pa = pa + offset;
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
-
-/**
- * vb2_common_vm_open() - increase refcount of the vma
- * @vma: virtual memory region for the mapping
- *
- * This function adds another user to the provided vma. It expects
- * struct vb2_vmarea_handler pointer in vma->vm_private_data.
- */
-static void vb2_common_vm_open(struct vm_area_struct *vma)
-{
- struct vb2_vmarea_handler *h = vma->vm_private_data;
-
- pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
- __func__, h, atomic_read(h->refcount), vma->vm_start,
- vma->vm_end);
-
- atomic_inc(h->refcount);
-}
-
-/**
- * vb2_common_vm_close() - decrease refcount of the vma
- * @vma: virtual memory region for the mapping
- *
- * This function releases the user from the provided vma. It expects
- * struct vb2_vmarea_handler pointer in vma->vm_private_data.
- */
-static void vb2_common_vm_close(struct vm_area_struct *vma)
-{
- struct vb2_vmarea_handler *h = vma->vm_private_data;
-
- pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
- __func__, h, atomic_read(h->refcount), vma->vm_start,
- vma->vm_end);
-
- h->put(h->arg);
-}
-
-/**
- * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
- * video buffers
- */
-const struct vm_operations_struct vb2_common_vm_ops = {
- .open = vb2_common_vm_open,
- .close = vb2_common_vm_close,
-};
-EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
-
-MODULE_DESCRIPTION("common memory handling routines for videobuf2");
-MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
deleted file mode 100644
index 313d9771b2bc..000000000000
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
- *
- * Copyright (C) 2010 Samsung Electronics
- *
- * Author: Pawel Osciak <pawel@osciak.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-vmalloc.h>
-#include <media/videobuf2-memops.h>
-
-struct vb2_vmalloc_buf {
- void *vaddr;
- struct page **pages;
- struct vm_area_struct *vma;
- int write;
- unsigned long size;
- unsigned int n_pages;
- atomic_t refcount;
- struct vb2_vmarea_handler handler;
- struct dma_buf *dbuf;
-};
-
-static void vb2_vmalloc_put(void *buf_priv);
-
-static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
-{
- struct vb2_vmalloc_buf *buf;
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
- if (!buf)
- return NULL;
-
- buf->size = size;
- buf->vaddr = vmalloc_user(buf->size);
- buf->handler.refcount = &buf->refcount;
- buf->handler.put = vb2_vmalloc_put;
- buf->handler.arg = buf;
-
- if (!buf->vaddr) {
- pr_debug("vmalloc of size %ld failed\n", buf->size);
- kfree(buf);
- return NULL;
- }
-
- atomic_inc(&buf->refcount);
- return buf;
-}
-
-static void vb2_vmalloc_put(void *buf_priv)
-{
- struct vb2_vmalloc_buf *buf = buf_priv;
-
- if (atomic_dec_and_test(&buf->refcount)) {
- vfree(buf->vaddr);
- kfree(buf);
- }
-}
-
-static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
-{
- struct vb2_vmalloc_buf *buf;
- unsigned long first, last;
- int n_pages, offset;
- struct vm_area_struct *vma;
- dma_addr_t physp;
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return NULL;
-
- buf->write = write;
- offset = vaddr & ~PAGE_MASK;
- buf->size = size;
-
-
- vma = find_vma(current->mm, vaddr);
- if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
- if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
- goto fail_pages_array_alloc;
- buf->vma = vma;
- buf->vaddr = ioremap_nocache(physp, size);
- if (!buf->vaddr)
- goto fail_pages_array_alloc;
- } else {
- first = vaddr >> PAGE_SHIFT;
- last = (vaddr + size - 1) >> PAGE_SHIFT;
- buf->n_pages = last - first + 1;
- buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
- GFP_KERNEL);
- if (!buf->pages)
- goto fail_pages_array_alloc;
-
- /* current->mm->mmap_sem is taken by videobuf2 core */
- n_pages = get_user_pages(current, current->mm,
- vaddr & PAGE_MASK, buf->n_pages,
- write, 1, /* force */
- buf->pages, NULL);
- if (n_pages != buf->n_pages)
- goto fail_get_user_pages;
-
- buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
- PAGE_KERNEL);
- if (!buf->vaddr)
- goto fail_get_user_pages;
- }
-
- buf->vaddr += offset;
- return buf;
-
-fail_get_user_pages:
- pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
- buf->n_pages);
- while (--n_pages >= 0)
- put_page(buf->pages[n_pages]);
- kfree(buf->pages);
-
-fail_pages_array_alloc:
- kfree(buf);
-
- return NULL;
-}
-
-static void vb2_vmalloc_put_userptr(void *buf_priv)
-{
- struct vb2_vmalloc_buf *buf = buf_priv;
- unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
- unsigned int i;
-
- if (buf->pages) {
- if (vaddr)
- vm_unmap_ram((void *)vaddr, buf->n_pages);
- for (i = 0; i < buf->n_pages; ++i) {
- if (buf->write)
- set_page_dirty_lock(buf->pages[i]);
- put_page(buf->pages[i]);
- }
- kfree(buf->pages);
- } else {
- if (buf->vma)
- vb2_put_vma(buf->vma);
- iounmap(buf->vaddr);
- }
- kfree(buf);
-}
-
-static void *vb2_vmalloc_vaddr(void *buf_priv)
-{
- struct vb2_vmalloc_buf *buf = buf_priv;
-
- if (!buf->vaddr) {
- pr_err("Address of an unallocated plane requested "
- "or cannot map user pointer\n");
- return NULL;
- }
-
- return buf->vaddr;
-}
-
-static unsigned int vb2_vmalloc_num_users(void *buf_priv)
-{
- struct vb2_vmalloc_buf *buf = buf_priv;
- return atomic_read(&buf->refcount);
-}
-
-static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
-{
- struct vb2_vmalloc_buf *buf = buf_priv;
- int ret;
-
- if (!buf) {
- pr_err("No memory to map\n");
- return -EINVAL;
- }
-
- ret = remap_vmalloc_range(vma, buf->vaddr, 0);
- if (ret) {
- pr_err("Remapping vmalloc memory, error: %d\n", ret);
- return ret;
- }
-
- /*
- * Make sure that vm_areas for 2 buffers won't be merged together
- */
- vma->vm_flags |= VM_DONTEXPAND;
-
- /*
- * Use common vm_area operations to track buffer refcount.
- */
- vma->vm_private_data = &buf->handler;
- vma->vm_ops = &vb2_common_vm_ops;
-
- vma->vm_ops->open(vma);
-
- return 0;
-}
-
-/*********************************************/
-/* callbacks for DMABUF buffers */
-/*********************************************/
-
-static int vb2_vmalloc_map_dmabuf(void *mem_priv)
-{
- struct vb2_vmalloc_buf *buf = mem_priv;
-
- buf->vaddr = dma_buf_vmap(buf->dbuf);
-
- return buf->vaddr ? 0 : -EFAULT;
-}
-
-static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
-{
- struct vb2_vmalloc_buf *buf = mem_priv;
-
- dma_buf_vunmap(buf->dbuf, buf->vaddr);
- buf->vaddr = NULL;
-}
-
-static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
-{
- struct vb2_vmalloc_buf *buf = mem_priv;
-
- if (buf->vaddr)
- dma_buf_vunmap(buf->dbuf, buf->vaddr);
-
- kfree(buf);
-}
-
-static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
- unsigned long size, int write)
-{
- struct vb2_vmalloc_buf *buf;
-
- if (dbuf->size < size)
- return ERR_PTR(-EFAULT);
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- buf->dbuf = dbuf;
- buf->write = write;
- buf->size = size;
-
- return buf;
-}
-
-
-const struct vb2_mem_ops vb2_vmalloc_memops = {
- .alloc = vb2_vmalloc_alloc,
- .put = vb2_vmalloc_put,
- .get_userptr = vb2_vmalloc_get_userptr,
- .put_userptr = vb2_vmalloc_put_userptr,
- .map_dmabuf = vb2_vmalloc_map_dmabuf,
- .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
- .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
- .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
- .vaddr = vb2_vmalloc_vaddr,
- .mmap = vb2_vmalloc_mmap,
- .num_users = vb2_vmalloc_num_users,
-};
-EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
-
-MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
-MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
-MODULE_LICENSE("GPL");