diff options
Diffstat (limited to 'drivers/media/pci/intel/ipu3/ipu3-cio2.c')
| -rw-r--r-- | drivers/media/pci/intel/ipu3/ipu3-cio2.c | 739 |
1 files changed, 331 insertions, 408 deletions
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 92f5eadf2c99..986b9afd7cb5 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -1,28 +1,34 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2017 Intel Corporation + * Copyright (C) 2017,2020 Intel Corporation * * Based partially on Intel IPU4 driver written by * Sakari Ailus <sakari.ailus@linux.intel.com> - * Samu Onkalo <samu.onkalo@intel.com> + * Samu Onkalo * Jouni Högander <jouni.hogander@intel.com> - * Jouni Ukkonen <jouni.ukkonen@intel.com> + * Jouni Ukkonen * Antti Laakso <antti.laakso@intel.com> * et al. - * */ +#include <linux/bitops.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/mm.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/pfn.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/vmalloc.h> + +#include <media/ipu-bridge.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-fwnode.h> +#include <media/v4l2-mc.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-dma-sg.h> @@ -32,6 +38,7 @@ struct ipu3_cio2_fmt { u32 mbus_code; u32 fourcc; u8 mipicode; + u8 bpp; }; /* @@ -45,18 +52,27 @@ static const struct ipu3_cio2_fmt formats[] = { .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10, .mipicode = 0x2b, + .bpp = 10, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10, .mipicode = 0x2b, + .bpp = 10, }, { .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10, .mipicode = 0x2b, + .bpp = 10, }, { .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10, .mipicode = 0x2b, + .bpp = 10, + }, { + .mbus_code = MEDIA_BUS_FMT_Y10_1X10, + .fourcc = V4L2_PIX_FMT_IPU3_Y10, + .mipicode = 0x2b, + .bpp = 10, }, }; @@ -95,28 +111,29 @@ static inline u32 cio2_bytesperline(const unsigned int width) static void cio2_fbpt_exit_dummy(struct cio2_device *cio2) { + struct device *dev = &cio2->pci_dev->dev; + if (cio2->dummy_lop) { - dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, - cio2->dummy_lop, cio2->dummy_lop_bus_addr); + dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop, + cio2->dummy_lop_bus_addr); cio2->dummy_lop = NULL; } if (cio2->dummy_page) { - dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, - cio2->dummy_page, cio2->dummy_page_bus_addr); + dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page, + cio2->dummy_page_bus_addr); cio2->dummy_page = NULL; } } static int cio2_fbpt_init_dummy(struct cio2_device *cio2) { + struct device *dev = &cio2->pci_dev->dev; unsigned int i; - cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, - CIO2_PAGE_SIZE, + cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE, &cio2->dummy_page_bus_addr, GFP_KERNEL); - cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, - CIO2_PAGE_SIZE, + cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE, &cio2->dummy_lop_bus_addr, GFP_KERNEL); if (!cio2->dummy_page || !cio2->dummy_lop) { @@ -127,8 +144,8 @@ static int cio2_fbpt_init_dummy(struct cio2_device *cio2) * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each * Initialize each entry to dummy_page bus base address. */ - for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++) - cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT; + for (i = 0; i < CIO2_LOP_ENTRIES; i++) + cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr); return 0; } @@ -160,12 +177,11 @@ static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2, unsigned int i; entry[0].first_entry.first_page_offset = 0; - entry[1].second_entry.num_of_pages = - CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS; - entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1; + entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS; + entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1; for (i = 0; i < CIO2_MAX_LOPS; i++) - entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT; + entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr); cio2_fbpt_entry_enable(cio2, entry); } @@ -182,26 +198,23 @@ static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2, entry[0].first_entry.first_page_offset = b->offset; remaining = length + entry[0].first_entry.first_page_offset; - entry[1].second_entry.num_of_pages = - DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE); + entry[1].second_entry.num_of_pages = PFN_UP(remaining); /* * last_page_available_bytes has the offset of the last byte in the * last page which is still accessible by DMA. DMA cannot access * beyond this point. Valid range for this is from 0 to 4095. * 0 indicates 1st byte in the page is DMA accessible. - * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page + * 4095 (PAGE_SIZE - 1) means every single byte in the last page * is available for DMA transfer. */ - entry[1].second_entry.last_page_available_bytes = - (remaining & ~PAGE_MASK) ? - (remaining & ~PAGE_MASK) - 1 : - CIO2_PAGE_SIZE - 1; + remaining = offset_in_page(remaining) ?: PAGE_SIZE; + entry[1].second_entry.last_page_available_bytes = remaining - 1; /* Fill FBPT */ remaining = length; i = 0; while (remaining > 0) { - entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT; - remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE; + entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]); + remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE; entry++; i++; } @@ -209,7 +222,7 @@ static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2, /* * The first not meaningful FBPT entry should point to a valid LOP */ - entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT; + entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr); cio2_fbpt_entry_enable(cio2, entry); } @@ -290,37 +303,27 @@ static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def) return r; }; -/* Calculate the the delay value for termination enable of clock lane HS Rx */ +/* Calculate the delay value for termination enable of clock lane HS Rx */ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q, - struct cio2_csi2_timing *timing) + struct cio2_csi2_timing *timing, + unsigned int bpp, unsigned int lanes) { struct device *dev = &cio2->pci_dev->dev; - struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, }; - struct v4l2_ctrl *link_freq; + struct media_pad *src_pad; s64 freq; - int r; - - if (!q->sensor) - return -ENODEV; - - link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ); - if (!link_freq) { - dev_err(dev, "failed to find LINK_FREQ\n"); - return -EPIPE; - } - qm.index = v4l2_ctrl_g_ctrl(link_freq); - r = v4l2_querymenu(q->sensor->ctrl_handler, &qm); - if (r) { - dev_err(dev, "failed to get menu item\n"); - return r; + src_pad = media_entity_remote_source_pad_unique(&q->subdev.entity); + if (IS_ERR(src_pad)) { + dev_err(dev, "can't get source pad of %s (%pe)\n", + q->subdev.name, src_pad); + return PTR_ERR(src_pad); } - if (!qm.value) { - dev_err(dev, "error invalid link_freq\n"); - return -EINVAL; + freq = v4l2_get_link_freq(src_pad, bpp, lanes * 2); + if (freq < 0) { + dev_err(dev, "error %lld, invalid link_freq\n", freq); + return freq; } - freq = qm.value; timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A, CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B, @@ -355,20 +358,28 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS, CIO2_FBPT_SUBENTRY_UNIT); const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1; + struct v4l2_subdev_state *state; + const struct v4l2_mbus_framefmt *format; const struct ipu3_cio2_fmt *fmt; void __iomem *const base = cio2->base; u8 lanes, csi2bus = q->csi2.port; u8 sensor_vc = SENSOR_VIR_CH_DFLT; - struct cio2_csi2_timing timing; + struct cio2_csi2_timing timing = { 0 }; int i, r; - fmt = cio2_find_format(NULL, &q->subdev_fmt.code); + state = v4l2_subdev_lock_and_get_active_state(&q->subdev); + format = v4l2_subdev_state_get_format(state, CIO2_PAD_SINK); + + fmt = cio2_find_format(NULL, &format->code); + + v4l2_subdev_unlock_state(state); + if (!fmt) return -EINVAL; lanes = q->csi2.lanes; - r = cio2_csi2_calc_timing(cio2, q, &timing); + r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes); if (r) return r; @@ -475,8 +486,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) } /* Enable DMA */ - writel(q->fbpt_bus_addr >> PAGE_SHIFT, - base + CIO2_REG_CDMABA(CIO2_DMA_CHAN)); + writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN)); writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT | FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT | @@ -512,8 +522,11 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q) { - void __iomem *base = cio2->base; - unsigned int i, maxloops = 1000; + struct device *dev = &cio2->pci_dev->dev; + void __iomem *const base = cio2->base; + unsigned int i; + u32 value; + int ret; /* Disable CSI receiver and MIPI backend devices */ writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK); @@ -523,15 +536,11 @@ static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q) /* Halt DMA */ writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)); - do { - if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) & - CIO2_CDMAC0_DMA_HALTED) - break; - usleep_range(1000, 2000); - } while (--maxloops); - if (!maxloops) - dev_err(&cio2->pci_dev->dev, - "DMA %i can not be halted\n", CIO2_DMA_CHAN); + ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN), + value, value & CIO2_CDMAC0_DMA_HALTED, + 4000, 2000000); + if (ret) + dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN); for (i = 0; i < CIO2_NUM_PORTS; i++) { writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) | @@ -545,7 +554,7 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan) { struct device *dev = &cio2->pci_dev->dev; struct cio2_queue *q = cio2->cur_queue; - int buffers_found = 0; + struct cio2_fbpt_entry *entry; u64 ns = ktime_get_ns(); if (dma_chan >= CIO2_QUEUES) { @@ -553,43 +562,41 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan) return; } + entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; + if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) { + dev_warn(dev, "no ready buffers found on DMA channel %u\n", + dma_chan); + return; + } + /* Find out which buffer(s) are ready */ do { - struct cio2_fbpt_entry *const entry = - &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; struct cio2_buffer *b; - if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) - break; - b = q->bufs[q->bufs_first]; if (b) { - unsigned int bytes = entry[1].second_entry.num_of_bytes; + unsigned int received = entry[1].second_entry.num_of_bytes; + unsigned long payload = + vb2_get_plane_payload(&b->vbb.vb2_buf, 0); q->bufs[q->bufs_first] = NULL; atomic_dec(&q->bufs_queued); - dev_dbg(&cio2->pci_dev->dev, - "buffer %i done\n", b->vbb.vb2_buf.index); + dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index); b->vbb.vb2_buf.timestamp = ns; b->vbb.field = V4L2_FIELD_NONE; b->vbb.sequence = atomic_read(&q->frame_sequence); - if (b->vbb.vb2_buf.planes[0].length != bytes) - dev_warn(dev, "buffer length is %d received %d\n", - b->vbb.vb2_buf.planes[0].length, - bytes); + if (payload != received) + dev_warn(dev, + "payload length is %lu, received %u\n", + payload, received); vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE); } atomic_inc(&q->frame_sequence); cio2_fbpt_entry_init_dummy(cio2, entry); q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS; - buffers_found++; - } while (1); - - if (buffers_found == 0) - dev_warn(&cio2->pci_dev->dev, - "no ready buffers found on DMA channel %u\n", - dma_chan); + entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; + } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)); } static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q) @@ -628,6 +635,20 @@ static const char *const cio2_irq_errs[] = { "non-matching Long Packet stalled", }; +static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status) +{ + unsigned long csi2_status = status; + unsigned int i; + + for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs)) + dev_err(dev, "CSI-2 receiver port %i: %s\n", + port, cio2_irq_errs[i]); + + if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs)) + dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n", + csi2_status, port); +} + static const char *const cio2_port_errs[] = { "ECC recoverable", "DPHY not recoverable", @@ -638,10 +659,19 @@ static const char *const cio2_port_errs[] = { "PKT2LONG", }; +static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status) +{ + unsigned long port_status = status; + unsigned int i; + + for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs)) + dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]); +} + static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status) { - void __iomem *const base = cio2->base; struct device *dev = &cio2->pci_dev->dev; + void __iomem *const base = cio2->base; if (int_status & CIO2_INT_IOOE) { /* @@ -703,59 +733,32 @@ static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status) if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) { /* CSI2 receiver (error) interrupt */ - u32 ie_status, ie_clear; unsigned int port; + u32 ie_status; - ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE); - ie_status = ie_clear; + ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE); for (port = 0; port < CIO2_NUM_PORTS; port++) { u32 port_status = (ie_status >> (port * 8)) & 0xff; - u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1; - void __iomem *const csi_rx_base = - base + CIO2_REG_PIPE_BASE(port); - unsigned int i; - - while (port_status & err_mask) { - i = ffs(port_status) - 1; - dev_err(dev, "port %i error %s\n", - port, cio2_port_errs[i]); - ie_status &= ~BIT(port * 8 + i); - port_status &= ~BIT(i); - } + + cio2_irq_log_port_errs(dev, port, port_status); if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) { - u32 csi2_status, csi2_clear; + void __iomem *csi_rx_base = + base + CIO2_REG_PIPE_BASE(port); + u32 csi2_status; csi2_status = readl(csi_rx_base + CIO2_REG_IRQCTRL_STATUS); - csi2_clear = csi2_status; - err_mask = - BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1; - - while (csi2_status & err_mask) { - i = ffs(csi2_status) - 1; - dev_err(dev, - "CSI-2 receiver port %i: %s\n", - port, cio2_irq_errs[i]); - csi2_status &= ~BIT(i); - } - - writel(csi2_clear, - csi_rx_base + CIO2_REG_IRQCTRL_CLEAR); - if (csi2_status) - dev_warn(dev, - "unknown CSI2 error 0x%x on port %i\n", - csi2_status, port); - ie_status &= ~CIO2_INT_EXT_IE_IRQ(port); + cio2_irq_log_irq_errs(dev, port, csi2_status); + + writel(csi2_status, + csi_rx_base + CIO2_REG_IRQCTRL_CLEAR); } } - writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE); - if (ie_status) - dev_warn(dev, "unknown interrupt 0x%x on IE\n", - ie_status); + writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE); int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ); } @@ -799,6 +802,7 @@ static void cio2_vb2_return_all_buffers(struct cio2_queue *q, atomic_dec(&q->bufs_queued); vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf, state); + q->bufs[i] = NULL; } } } @@ -810,16 +814,21 @@ static int cio2_vb2_queue_setup(struct vb2_queue *vq, struct device *alloc_devs[]) { struct cio2_device *cio2 = vb2_get_drv_priv(vq); + struct device *dev = &cio2->pci_dev->dev; struct cio2_queue *q = vb2q_to_cio2_queue(vq); unsigned int i; - *num_planes = q->format.num_planes; + if (*num_planes && *num_planes < q->format.num_planes) + return -EINVAL; - for (i = 0; i < *num_planes; ++i) { + for (i = 0; i < q->format.num_planes; ++i) { + if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage) + return -EINVAL; sizes[i] = q->format.plane_fmt[i].sizeimage; - alloc_devs[i] = &cio2->pci_dev->dev; + alloc_devs[i] = dev; } + *num_planes = q->format.num_planes; *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS); /* Initialize buffer queue */ @@ -839,15 +848,12 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb) { struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); struct device *dev = &cio2->pci_dev->dev; - struct cio2_buffer *b = - container_of(vb, struct cio2_buffer, vbb.vb2_buf); - static const unsigned int entries_per_page = - CIO2_PAGE_SIZE / sizeof(u32); - unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE); - unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page); + struct cio2_buffer *b = to_cio2_buffer(vb); + unsigned int pages = PFN_UP(vb->planes[0].length); + unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES); struct sg_table *sg; struct sg_dma_page_iter sg_iter; - int i, j; + unsigned int i, j; if (lops <= 0 || lops > CIO2_MAX_LOPS) { dev_err(dev, "%s: bad buffer size (%i)\n", __func__, @@ -858,7 +864,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb) memset(b->lop, 0, sizeof(b->lop)); /* Allocate LOP table */ for (i = 0; i < lops; i++) { - b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE, + b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE, &b->lop_bus_addr[i], GFP_KERNEL); if (!b->lop[i]) goto fail; @@ -873,23 +879,22 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb) b->offset = sg->sgl->offset; i = j = 0; - for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) { + for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) { if (!pages--) break; - b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT; + b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter)); j++; - if (j == entries_per_page) { + if (j == CIO2_LOP_ENTRIES) { i++; j = 0; } } - b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT; + b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr); return 0; fail: - for (i--; i >= 0; i--) - dma_free_coherent(dev, CIO2_PAGE_SIZE, - b->lop[i], b->lop_bus_addr[i]); + while (i--) + dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]); return -ENOMEM; } @@ -897,17 +902,17 @@ fail: static void cio2_vb2_buf_queue(struct vb2_buffer *vb) { struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); + struct device *dev = &cio2->pci_dev->dev; struct cio2_queue *q = container_of(vb->vb2_queue, struct cio2_queue, vbq); - struct cio2_buffer *b = - container_of(vb, struct cio2_buffer, vbb.vb2_buf); + struct cio2_buffer *b = to_cio2_buffer(vb); struct cio2_fbpt_entry *entry; unsigned long flags; unsigned int i, j, next = q->bufs_next; int bufs_queued = atomic_inc_return(&q->bufs_queued); u32 fbpt_rp; - dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index); + dev_dbg(dev, "queue buffer %d\n", vb->index); /* * This code queues the buffer to the CIO2 DMA engine, which starts @@ -958,12 +963,12 @@ static void cio2_vb2_buf_queue(struct vb2_buffer *vb) return; } - dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next); + dev_dbg(dev, "entry %i was full!\n", next); next = (next + 1) % CIO2_MAX_BUFFERS; } local_irq_restore(flags); - dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n"); + dev_err(dev, "error: all cio2 entries were full!\n"); atomic_dec(&q->bufs_queued); vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); } @@ -972,14 +977,14 @@ static void cio2_vb2_buf_queue(struct vb2_buffer *vb) static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb) { struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); - struct cio2_buffer *b = - container_of(vb, struct cio2_buffer, vbb.vb2_buf); + struct device *dev = &cio2->pci_dev->dev; + struct cio2_buffer *b = to_cio2_buffer(vb); unsigned int i; /* Free LOP table */ for (i = 0; i < CIO2_MAX_LOPS; i++) { if (b->lop[i]) - dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, + dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]); } } @@ -988,19 +993,19 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) { struct cio2_queue *q = vb2q_to_cio2_queue(vq); struct cio2_device *cio2 = vb2_get_drv_priv(vq); + struct device *dev = &cio2->pci_dev->dev; int r; cio2->cur_queue = q; atomic_set(&q->frame_sequence, 0); - r = pm_runtime_get_sync(&cio2->pci_dev->dev); + r = pm_runtime_resume_and_get(dev); if (r < 0) { - dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r); - pm_runtime_put_noidle(&cio2->pci_dev->dev); + dev_info(dev, "failed to set power %d\n", r); return r; } - r = media_pipeline_start(&q->vdev.entity, &q->pipe); + r = video_device_pipeline_start(&q->vdev, &q->pipe); if (r) goto fail_pipeline; @@ -1020,11 +1025,11 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) fail_csi2_subdev: cio2_hw_exit(cio2, q); fail_hw: - media_pipeline_stop(&q->vdev.entity); + video_device_pipeline_stop(&q->vdev); fail_pipeline: - dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r); + dev_dbg(dev, "failed to start streaming (%d)\n", r); cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED); - pm_runtime_put(&cio2->pci_dev->dev); + pm_runtime_put(dev); return r; } @@ -1033,16 +1038,16 @@ static void cio2_vb2_stop_streaming(struct vb2_queue *vq) { struct cio2_queue *q = vb2q_to_cio2_queue(vq); struct cio2_device *cio2 = vb2_get_drv_priv(vq); + struct device *dev = &cio2->pci_dev->dev; if (v4l2_subdev_call(q->sensor, video, s_stream, 0)) - dev_err(&cio2->pci_dev->dev, - "failed to stop sensor streaming\n"); + dev_err(dev, "failed to stop sensor streaming\n"); cio2_hw_exit(cio2, q); synchronize_irq(cio2->pci_dev->irq); cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR); - media_pipeline_stop(&q->vdev.entity); - pm_runtime_put(&cio2->pci_dev->dev); + video_device_pipeline_stop(&q->vdev); + pm_runtime_put(dev); cio2->streaming = false; } @@ -1053,8 +1058,6 @@ static const struct vb2_ops cio2_vb2_ops = { .queue_setup = cio2_vb2_queue_setup, .start_streaming = cio2_vb2_start_streaming, .stop_streaming = cio2_vb2_stop_streaming, - .wait_prepare = vb2_ops_wait_prepare, - .wait_finish = vb2_ops_wait_finish, }; /**************** V4L2 interface ****************/ @@ -1062,12 +1065,8 @@ static const struct vb2_ops cio2_vb2_ops = { static int cio2_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { - struct cio2_device *cio2 = video_drvdata(file); - strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver)); strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card)); - snprintf(cap->bus_info, sizeof(cap->bus_info), - "PCI:%s", pci_name(cio2->pci_dev)); return 0; } @@ -1105,19 +1104,16 @@ static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f) /* Only supports up to 4224x3136 */ if (mpix->width > CIO2_IMAGE_MAX_WIDTH) mpix->width = CIO2_IMAGE_MAX_WIDTH; - if (mpix->height > CIO2_IMAGE_MAX_LENGTH) - mpix->height = CIO2_IMAGE_MAX_LENGTH; + if (mpix->height > CIO2_IMAGE_MAX_HEIGHT) + mpix->height = CIO2_IMAGE_MAX_HEIGHT; mpix->num_planes = 1; mpix->pixelformat = fmt->fourcc; mpix->colorspace = V4L2_COLORSPACE_RAW; mpix->field = V4L2_FIELD_NONE; - memset(mpix->reserved, 0, sizeof(mpix->reserved)); mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width); mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline * mpix->height; - memset(mpix->plane_fmt[0].reserved, 0, - sizeof(mpix->plane_fmt[0].reserved)); /* use default */ mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; @@ -1206,9 +1202,9 @@ static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd, return v4l2_event_subscribe(fh, sub, 0, NULL); } -static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +static int cio2_subdev_init_state(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state) { - struct v4l2_mbus_framefmt *format; const struct v4l2_mbus_framefmt fmt_default = { .width = 1936, .height = 1096, @@ -1219,92 +1215,59 @@ static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) .quantization = V4L2_QUANTIZATION_DEFAULT, .xfer_func = V4L2_XFER_FUNC_DEFAULT, }; + struct v4l2_mbus_framefmt *format; - /* Initialize try_fmt */ - format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK); + /* Initialize the format on the sink and source pads. */ + format = v4l2_subdev_state_get_format(state, CIO2_PAD_SINK); *format = fmt_default; /* same as sink */ - format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE); + format = v4l2_subdev_state_get_format(state, CIO2_PAD_SOURCE); *format = fmt_default; return 0; } -/* - * cio2_subdev_get_fmt - Handle get format by pads subdev method - * @sd : pointer to v4l2 subdev structure - * @cfg: V4L2 subdev pad config - * @fmt: pointer to v4l2 subdev format structure - * return -EINVAL or zero on success - */ -static int cio2_subdev_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *fmt) -{ - struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); - struct v4l2_subdev_format format; - int ret; - - if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { - fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad); - return 0; - } - - if (fmt->pad == CIO2_PAD_SINK) { - format.which = V4L2_SUBDEV_FORMAT_ACTIVE; - ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, - &format); - - if (ret) - return ret; - /* update colorspace etc */ - q->subdev_fmt.colorspace = format.format.colorspace; - q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc; - q->subdev_fmt.quantization = format.format.quantization; - q->subdev_fmt.xfer_func = format.format.xfer_func; - } - - fmt->format = q->subdev_fmt; - - return 0; -} - -/* - * cio2_subdev_set_fmt - Handle set format by pads subdev method - * @sd : pointer to v4l2 subdev structure - * @cfg: V4L2 subdev pad config - * @fmt: pointer to v4l2 subdev format structure - * return -EINVAL or zero on success - */ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { - struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); + struct v4l2_mbus_framefmt *mbus; + u32 mbus_code = fmt->format.code; + unsigned int i; /* * Only allow setting sink pad format; * source always propagates from sink */ if (fmt->pad == CIO2_PAD_SOURCE) - return cio2_subdev_get_fmt(sd, cfg, fmt); - - if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { - *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; - } else { - /* It's the sink, allow changing frame size */ - q->subdev_fmt.width = fmt->format.width; - q->subdev_fmt.height = fmt->format.height; - q->subdev_fmt.code = fmt->format.code; - fmt->format = q->subdev_fmt; + return v4l2_subdev_get_fmt(sd, sd_state, fmt); + + fmt->format.code = formats[0].mbus_code; + + for (i = 0; i < ARRAY_SIZE(formats); i++) { + if (formats[i].mbus_code == mbus_code) { + fmt->format.code = mbus_code; + break; + } } + fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH); + fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT); + fmt->format.field = V4L2_FIELD_NONE; + + mbus = v4l2_subdev_state_get_format(sd_state, CIO2_PAD_SINK); + *mbus = fmt->format; + + /* Propagate the format to the source pad. */ + mbus = v4l2_subdev_state_get_format(sd_state, CIO2_PAD_SOURCE); + *mbus = fmt->format; + return 0; } static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index >= ARRAY_SIZE(formats)) @@ -1321,6 +1284,7 @@ static int cio2_subdev_link_validate_get_format(struct media_pad *pad, struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(pad->entity); + memset(fmt, 0, sizeof(*fmt)); fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; fmt->pad = pad->index; return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt); @@ -1331,16 +1295,16 @@ static int cio2_subdev_link_validate_get_format(struct media_pad *pad, static int cio2_video_link_validate(struct media_link *link) { - struct video_device *vd = container_of(link->sink->entity, - struct video_device, entity); + struct media_entity *entity = link->sink->entity; + struct video_device *vd = media_entity_to_video_device(entity); struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev); struct cio2_device *cio2 = video_get_drvdata(vd); + struct device *dev = &cio2->pci_dev->dev; struct v4l2_subdev_format source_fmt; int ret; - if (!media_entity_remote_pad(link->sink->entity->pads)) { - dev_info(&cio2->pci_dev->dev, - "video node %s pad not connected\n", vd->name); + if (!media_pad_remote_pad_first(entity->pads)) { + dev_info(dev, "video node %s pad not connected\n", vd->name); return -ENOTCONN; } @@ -1350,8 +1314,7 @@ static int cio2_video_link_validate(struct media_link *link) if (source_fmt.format.width != q->format.width || source_fmt.format.height != q->format.height) { - dev_err(&cio2->pci_dev->dev, - "Wrong width or height %ux%u (%ux%u expected)\n", + dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n", q->format.width, q->format.height, source_fmt.format.width, source_fmt.format.height); return -EINVAL; @@ -1369,12 +1332,12 @@ static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = { }; static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = { - .open = cio2_subdev_open, + .init_state = cio2_subdev_init_state, }; static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = { .link_validate = v4l2_subdev_link_validate_default, - .get_fmt = cio2_subdev_get_fmt, + .get_fmt = v4l2_subdev_get_fmt, .set_fmt = cio2_subdev_set_fmt, .enum_mbus_code = cio2_subdev_enum_mbus_code, }; @@ -1387,24 +1350,30 @@ static const struct v4l2_subdev_ops cio2_subdev_ops = { /******* V4L2 sub-device asynchronous registration callbacks***********/ struct sensor_async_subdev { - struct v4l2_async_subdev asd; + struct v4l2_async_connection asd; struct csi2_bus_info csi2; }; +#define to_sensor_asd(__asd) \ + container_of_const(__asd, struct sensor_async_subdev, asd) + /* The .bound() notifier callback when a match is found */ static int cio2_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, - struct v4l2_async_subdev *asd) + struct v4l2_async_connection *asd) { - struct cio2_device *cio2 = container_of(notifier, - struct cio2_device, notifier); - struct sensor_async_subdev *s_asd = container_of(asd, - struct sensor_async_subdev, asd); + struct cio2_device *cio2 = to_cio2_device(notifier); + struct sensor_async_subdev *s_asd = to_sensor_asd(asd); struct cio2_queue *q; + int ret; if (cio2->queue[s_asd->csi2.port].sensor) return -EBUSY; + ret = ipu_bridge_instantiate_vcm(sd->dev); + if (ret) + return ret; + q = &cio2->queue[s_asd->csi2.port]; q->csi2 = s_asd->csi2; @@ -1417,12 +1386,10 @@ static int cio2_notifier_bound(struct v4l2_async_notifier *notifier, /* The .unbind callback */ static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, - struct v4l2_async_subdev *asd) + struct v4l2_async_connection *asd) { - struct cio2_device *cio2 = container_of(notifier, - struct cio2_device, notifier); - struct sensor_async_subdev *s_asd = container_of(asd, - struct sensor_async_subdev, asd); + struct cio2_device *cio2 = to_cio2_device(notifier); + struct sensor_async_subdev *s_asd = to_sensor_asd(asd); cio2->queue[s_asd->csi2.port].sensor = NULL; } @@ -1430,40 +1397,20 @@ static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier, /* .complete() is called after all subdevices have been located */ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier) { - struct cio2_device *cio2 = container_of(notifier, struct cio2_device, - notifier); + struct cio2_device *cio2 = to_cio2_device(notifier); struct sensor_async_subdev *s_asd; - struct v4l2_async_subdev *asd; + struct v4l2_async_connection *asd; struct cio2_queue *q; - unsigned int pad; int ret; - list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) { - s_asd = container_of(asd, struct sensor_async_subdev, asd); + list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) { + s_asd = to_sensor_asd(asd); q = &cio2->queue[s_asd->csi2.port]; - for (pad = 0; pad < q->sensor->entity.num_pads; pad++) - if (q->sensor->entity.pads[pad].flags & - MEDIA_PAD_FL_SOURCE) - break; - - if (pad == q->sensor->entity.num_pads) { - dev_err(&cio2->pci_dev->dev, - "failed to find src pad for %s\n", - q->sensor->name); - return -ENXIO; - } - - ret = media_create_pad_link( - &q->sensor->entity, pad, - &q->subdev.entity, CIO2_PAD_SINK, - 0); - if (ret) { - dev_err(&cio2->pci_dev->dev, - "failed to create link for %s\n", - q->sensor->name); + ret = v4l2_create_fwnode_links_to_pad(asd->sd, + &q->subdev_pads[CIO2_PAD_SINK], 0); + if (ret) return ret; - } } return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev); @@ -1477,6 +1424,7 @@ static const struct v4l2_async_notifier_operations cio2_async_ops = { static int cio2_parse_firmware(struct cio2_device *cio2) { + struct device *dev = &cio2->pci_dev->dev; unsigned int i; int ret; @@ -1484,13 +1432,11 @@ static int cio2_parse_firmware(struct cio2_device *cio2) struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY }; - struct sensor_async_subdev *s_asd = NULL; + struct sensor_async_subdev *s_asd; struct fwnode_handle *ep; - ep = fwnode_graph_get_endpoint_by_id( - dev_fwnode(&cio2->pci_dev->dev), i, 0, - FWNODE_GRAPH_ENDPOINT_NEXT); - + ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0, + FWNODE_GRAPH_ENDPOINT_NEXT); if (!ep) continue; @@ -1498,27 +1444,23 @@ static int cio2_parse_firmware(struct cio2_device *cio2) if (ret) goto err_parse; - s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL); - if (!s_asd) { - ret = -ENOMEM; + s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep, + struct + sensor_async_subdev); + if (IS_ERR(s_asd)) { + ret = PTR_ERR(s_asd); goto err_parse; } s_asd->csi2.port = vep.base.port; s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes; - ret = v4l2_async_notifier_add_fwnode_remote_subdev( - &cio2->notifier, ep, &s_asd->asd); - if (ret) - goto err_parse; - fwnode_handle_put(ep); continue; err_parse: fwnode_handle_put(ep); - kfree(s_asd); return ret; } @@ -1527,10 +1469,9 @@ err_parse: * suspend. */ cio2->notifier.ops = &cio2_async_ops; - ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier); + ret = v4l2_async_nf_register(&cio2->notifier); if (ret) - dev_err(&cio2->pci_dev->dev, - "failed to register async notifier : %d\n", ret); + dev_err(dev, "failed to register async notifier : %d\n", ret); return ret; } @@ -1548,27 +1489,18 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) { static const u32 default_width = 1936; static const u32 default_height = 1096; - const struct ipu3_cio2_fmt dflt_fmt = formats[0]; - + struct device *dev = &cio2->pci_dev->dev; struct video_device *vdev = &q->vdev; struct vb2_queue *vbq = &q->vbq; struct v4l2_subdev *subdev = &q->subdev; - struct v4l2_mbus_framefmt *fmt; int r; /* Initialize miscellaneous variables */ mutex_init(&q->lock); - /* Initialize formats to default values */ - fmt = &q->subdev_fmt; - fmt->width = default_width; - fmt->height = default_height; - fmt->code = dflt_fmt.mbus_code; - fmt->field = V4L2_FIELD_NONE; - q->format.width = default_width; q->format.height = default_height; - q->format.pixelformat = dflt_fmt.fourcc; + q->format.pixelformat = formats[0].fourcc; q->format.colorspace = V4L2_COLORSPACE_RAW; q->format.field = V4L2_FIELD_NONE; q->format.num_planes = 1; @@ -1590,8 +1522,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) subdev->internal_ops = &cio2_subdev_internal_ops; r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads); if (r) { - dev_err(&cio2->pci_dev->dev, - "failed initialize subdev media entity (%d)\n", r); + dev_err(dev, "failed initialize subdev media entity (%d)\n", r); goto fail_subdev_media_entity; } @@ -1599,8 +1530,8 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) vdev->entity.ops = &cio2_video_entity_ops; r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad); if (r) { - dev_err(&cio2->pci_dev->dev, - "failed initialize videodev media entity (%d)\n", r); + dev_err(dev, "failed initialize videodev media entity (%d)\n", + r); goto fail_vdev_media_entity; } @@ -1608,14 +1539,21 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) v4l2_subdev_init(subdev, &cio2_subdev_ops); subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; subdev->owner = THIS_MODULE; + subdev->dev = dev; snprintf(subdev->name, sizeof(subdev->name), CIO2_ENTITY_NAME " %td", q - cio2->queue); subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; v4l2_set_subdevdata(subdev, cio2); + + r = v4l2_subdev_init_finalize(subdev); + if (r) { + dev_err(dev, "failed to initialize subdev (%d)\n", r); + goto fail_subdev; + } + r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev); if (r) { - dev_err(&cio2->pci_dev->dev, - "failed initialize subdev (%d)\n", r); + dev_err(dev, "failed to register subdev (%d)\n", r); goto fail_subdev; } @@ -1626,14 +1564,13 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) vbq->mem_ops = &vb2_dma_sg_memops; vbq->buf_struct_size = sizeof(struct cio2_buffer); vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - vbq->min_buffers_needed = 1; + vbq->min_queued_buffers = 1; vbq->drv_priv = cio2; vbq->lock = &q->lock; r = vb2_queue_init(vbq); if (r) { - dev_err(&cio2->pci_dev->dev, - "failed to initialize videobuf2 queue (%d)\n", r); - goto fail_vbq; + dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r); + goto fail_subdev; } /* Initialize vdev */ @@ -1649,8 +1586,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) video_set_drvdata(vdev, cio2); r = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (r) { - dev_err(&cio2->pci_dev->dev, - "failed to register video device (%d)\n", r); + dev_err(dev, "failed to register video device (%d)\n", r); goto fail_vdev; } @@ -1664,17 +1600,15 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) return 0; fail_link: - video_unregister_device(&q->vdev); + vb2_video_unregister_device(&q->vdev); fail_vdev: - vb2_queue_release(vbq); -fail_vbq: v4l2_device_unregister_subdev(subdev); fail_subdev: media_entity_cleanup(&vdev->entity); fail_vdev_media_entity: media_entity_cleanup(&subdev->entity); fail_subdev_media_entity: - cio2_fbpt_exit(q, &cio2->pci_dev->dev); + cio2_fbpt_exit(q, dev); fail_fbpt: mutex_destroy(&q->lock); @@ -1683,9 +1617,8 @@ fail_fbpt: static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q) { - video_unregister_device(&q->vdev); + vb2_video_unregister_device(&q->vdev); media_entity_cleanup(&q->vdev.entity); - vb2_queue_release(&q->vbq); v4l2_device_unregister_subdev(&q->subdev); media_entity_cleanup(&q->subdev.entity); cio2_fbpt_exit(q, &cio2->pci_dev->dev); @@ -1721,72 +1654,58 @@ static void cio2_queues_exit(struct cio2_device *cio2) /**************** PCI interface ****************/ -static int cio2_pci_config_setup(struct pci_dev *dev) -{ - u16 pci_command; - int r = pci_enable_msi(dev); - - if (r) { - dev_err(&dev->dev, "failed to enable MSI (%d)\n", r); - return r; - } - - pci_read_config_word(dev, PCI_COMMAND, &pci_command); - pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | - PCI_COMMAND_INTX_DISABLE; - pci_write_config_word(dev, PCI_COMMAND, pci_command); - - return 0; -} - static int cio2_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { + struct device *dev = &pci_dev->dev; struct cio2_device *cio2; - void __iomem *const *iomap; int r; - cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL); + /* + * On some platforms no connections to sensors are defined in firmware, + * if the device has no endpoints then we can try to build those as + * software_nodes parsed from SSDB. + */ + r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); + if (r) + return r; + + cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL); if (!cio2) return -ENOMEM; cio2->pci_dev = pci_dev; r = pcim_enable_device(pci_dev); if (r) { - dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r); + dev_err(dev, "failed to enable device (%d)\n", r); return r; } - dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n", + dev_info(dev, "device 0x%x (rev: 0x%x)\n", pci_dev->device, pci_dev->revision); - r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev)); + cio2->base = pcim_iomap_region(pci_dev, CIO2_PCI_BAR, CIO2_NAME); + r = PTR_ERR_OR_ZERO(cio2->base); if (r) { - dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r); - return -ENODEV; - } - - iomap = pcim_iomap_table(pci_dev); - if (!iomap) { - dev_err(&pci_dev->dev, "failed to iomap table\n"); + dev_err(dev, "failed to remap I/O memory (%d)\n", r); return -ENODEV; } - cio2->base = iomap[CIO2_PCI_BAR]; - pci_set_drvdata(pci_dev, cio2); pci_set_master(pci_dev); - r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK); + r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK); if (r) { - dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r); + dev_err(dev, "failed to set DMA mask (%d)\n", r); return -ENODEV; } - r = cio2_pci_config_setup(pci_dev); - if (r) - return -ENODEV; + r = pci_enable_msi(pci_dev); + if (r) { + dev_err(dev, "failed to enable MSI (%d)\n", r); + return r; + } r = cio2_fbpt_init_dummy(cio2); if (r) @@ -1794,11 +1713,9 @@ static int cio2_pci_probe(struct pci_dev *pci_dev, mutex_init(&cio2->lock); - cio2->media_dev.dev = &cio2->pci_dev->dev; + cio2->media_dev.dev = dev; strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME, sizeof(cio2->media_dev.model)); - snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info), - "PCI:%s", pci_name(cio2->pci_dev)); cio2->media_dev.hw_revision = 0; media_device_init(&cio2->media_dev); @@ -1807,10 +1724,9 @@ static int cio2_pci_probe(struct pci_dev *pci_dev, goto fail_mutex_destroy; cio2->v4l2_dev.mdev = &cio2->media_dev; - r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev); + r = v4l2_device_register(dev, &cio2->v4l2_dev); if (r) { - dev_err(&pci_dev->dev, - "failed to register V4L2 device (%d)\n", r); + dev_err(dev, "failed to register V4L2 device (%d)\n", r); goto fail_media_device_unregister; } @@ -1818,28 +1734,28 @@ static int cio2_pci_probe(struct pci_dev *pci_dev, if (r) goto fail_v4l2_device_unregister; - v4l2_async_notifier_init(&cio2->notifier); + v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev); + + r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED, + CIO2_NAME, cio2); + if (r) { + dev_err(dev, "failed to request IRQ (%d)\n", r); + goto fail_clean_notifier; + } /* Register notifier for subdevices we care */ r = cio2_parse_firmware(cio2); if (r) goto fail_clean_notifier; - r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq, - IRQF_SHARED, CIO2_NAME, cio2); - if (r) { - dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r); - goto fail_clean_notifier; - } - - pm_runtime_put_noidle(&pci_dev->dev); - pm_runtime_allow(&pci_dev->dev); + pm_runtime_put_noidle(dev); + pm_runtime_allow(dev); return 0; fail_clean_notifier: - v4l2_async_notifier_unregister(&cio2->notifier); - v4l2_async_notifier_cleanup(&cio2->notifier); + v4l2_async_nf_unregister(&cio2->notifier); + v4l2_async_nf_cleanup(&cio2->notifier); cio2_queues_exit(cio2); fail_v4l2_device_unregister: v4l2_device_unregister(&cio2->v4l2_dev); @@ -1858,13 +1774,16 @@ static void cio2_pci_remove(struct pci_dev *pci_dev) struct cio2_device *cio2 = pci_get_drvdata(pci_dev); media_device_unregister(&cio2->media_dev); - v4l2_async_notifier_unregister(&cio2->notifier); - v4l2_async_notifier_cleanup(&cio2->notifier); + v4l2_async_nf_unregister(&cio2->notifier); + v4l2_async_nf_cleanup(&cio2->notifier); cio2_queues_exit(cio2); cio2_fbpt_exit_dummy(cio2); v4l2_device_unregister(&cio2->v4l2_dev); media_device_cleanup(&cio2->media_dev); mutex_destroy(&cio2->lock); + + pm_runtime_forbid(&pci_dev->dev); + pm_runtime_get_noresume(&pci_dev->dev); } static int __maybe_unused cio2_runtime_suspend(struct device *dev) @@ -1872,16 +1791,10 @@ static int __maybe_unused cio2_runtime_suspend(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct cio2_device *cio2 = pci_get_drvdata(pci_dev); void __iomem *const base = cio2->base; - u16 pm; writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C); dev_dbg(dev, "cio2 runtime suspend.\n"); - pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm); - pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT; - pm |= CIO2_PMCSR_D3; - pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm); - return 0; } @@ -1890,15 +1803,10 @@ static int __maybe_unused cio2_runtime_resume(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct cio2_device *cio2 = pci_get_drvdata(pci_dev); void __iomem *const base = cio2->base; - u16 pm; writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C); dev_dbg(dev, "cio2 runtime resume.\n"); - pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm); - pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT; - pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm); - return 0; } @@ -1987,12 +1895,19 @@ static int __maybe_unused cio2_suspend(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct cio2_device *cio2 = pci_get_drvdata(pci_dev); struct cio2_queue *q = cio2->cur_queue; + int r; dev_dbg(dev, "cio2 suspend\n"); if (!cio2->streaming) return 0; /* Stop stream */ + r = v4l2_subdev_call(q->sensor, video, s_stream, 0); + if (r) { + dev_err(dev, "failed to stop sensor streaming\n"); + return r; + } + cio2_hw_exit(cio2, q); synchronize_irq(pci_dev->irq); @@ -2012,23 +1927,30 @@ static int __maybe_unused cio2_suspend(struct device *dev) static int __maybe_unused cio2_resume(struct device *dev) { struct cio2_device *cio2 = dev_get_drvdata(dev); - int r = 0; struct cio2_queue *q = cio2->cur_queue; + int r; dev_dbg(dev, "cio2 resume\n"); if (!cio2->streaming) return 0; /* Start stream */ - r = pm_runtime_force_resume(&cio2->pci_dev->dev); + r = pm_runtime_force_resume(dev); if (r < 0) { - dev_err(&cio2->pci_dev->dev, - "failed to set power %d\n", r); + dev_err(dev, "failed to set power %d\n", r); return r; } r = cio2_hw_init(cio2, q); - if (r) + if (r) { dev_err(dev, "fail to init cio2 hw\n"); + return r; + } + + r = v4l2_subdev_call(q->sensor, video, s_stream, 1); + if (r) { + dev_err(dev, "fail to start sensor streaming\n"); + cio2_hw_exit(cio2, q); + } return r; } @@ -2040,7 +1962,7 @@ static const struct dev_pm_ops cio2_pm_ops = { static const struct pci_device_id cio2_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) }, - { 0 } + { } }; MODULE_DEVICE_TABLE(pci, cio2_pci_id_table); @@ -2057,10 +1979,11 @@ static struct pci_driver cio2_pci_driver = { module_pci_driver(cio2_pci_driver); -MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>"); +MODULE_AUTHOR("Tuukka Toivonen"); MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>"); MODULE_AUTHOR("Jian Xu Zheng"); -MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>"); +MODULE_AUTHOR("Yuning Pu"); MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("IPU3 CIO2 driver"); +MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); |
