1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
|
// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe-480.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v480 (SM8250)
*
* Copyright (C) 2020-2021 Linaro Ltd.
* Copyright (C) 2021 Jonathan Marek
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss.h"
#include "camss-vfe.h"
#define VFE_GLOBAL_RESET_CMD (vfe_is_lite(vfe) ? 0x0c : 0x1c)
#define GLOBAL_RESET_HW_AND_REG (vfe_is_lite(vfe) ? BIT(1) : BIT(0))
#define VFE_REG_UPDATE_CMD (vfe_is_lite(vfe) ? 0x20 : 0x34)
static inline int reg_update_rdi(struct vfe_device *vfe, int n)
{
return vfe_is_lite(vfe) ? BIT(n) : BIT(1 + (n));
}
#define REG_UPDATE_RDI reg_update_rdi
#define VFE_IRQ_CMD (vfe_is_lite(vfe) ? 0x24 : 0x38)
#define IRQ_CMD_GLOBAL_CLEAR BIT(0)
#define VFE_IRQ_MASK(n) ((vfe_is_lite(vfe) ? 0x28 : 0x3c) + (n) * 4)
#define IRQ_MASK_0_RESET_ACK (vfe_is_lite(vfe) ? BIT(17) : BIT(0))
#define IRQ_MASK_0_BUS_TOP_IRQ (vfe_is_lite(vfe) ? BIT(4) : BIT(7))
#define VFE_IRQ_CLEAR(n) ((vfe_is_lite(vfe) ? 0x34 : 0x48) + (n) * 4)
#define VFE_IRQ_STATUS(n) ((vfe_is_lite(vfe) ? 0x40 : 0x54) + (n) * 4)
#define BUS_REG_BASE (vfe_is_lite(vfe) ? 0x1a00 : 0xaa00)
#define VFE_BUS_WM_CGC_OVERRIDE (BUS_REG_BASE + 0x08)
#define WM_CGC_OVERRIDE_ALL (0x3FFFFFF)
#define VFE_BUS_WM_TEST_BUS_CTRL (BUS_REG_BASE + 0xdc)
#define VFE_BUS_IRQ_MASK(n) (BUS_REG_BASE + 0x18 + (n) * 4)
static inline int bus_irq_mask_0_rdi_rup(struct vfe_device *vfe, int n)
{
return vfe_is_lite(vfe) ? BIT(n) : BIT(3 + (n));
}
#define BUS_IRQ_MASK_0_RDI_RUP bus_irq_mask_0_rdi_rup
static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n)
{
return vfe_is_lite(vfe) ? BIT(4 + (n)) : BIT(6 + (n));
}
#define BUS_IRQ_MASK_0_COMP_DONE bus_irq_mask_0_comp_done
#define VFE_BUS_IRQ_CLEAR(n) (BUS_REG_BASE + 0x20 + (n) * 4)
#define VFE_BUS_IRQ_STATUS(n) (BUS_REG_BASE + 0x28 + (n) * 4)
#define VFE_BUS_IRQ_CLEAR_GLOBAL (BUS_REG_BASE + 0x30)
#define VFE_BUS_WM_CFG(n) (BUS_REG_BASE + 0x200 + (n) * 0x100)
#define WM_CFG_EN (0)
#define WM_CFG_MODE (16)
#define MODE_QCOM_PLAIN (0)
#define MODE_MIPI_RAW (1)
#define VFE_BUS_WM_IMAGE_ADDR(n) (BUS_REG_BASE + 0x204 + (n) * 0x100)
#define VFE_BUS_WM_FRAME_INCR(n) (BUS_REG_BASE + 0x208 + (n) * 0x100)
#define VFE_BUS_WM_IMAGE_CFG_0(n) (BUS_REG_BASE + 0x20c + (n) * 0x100)
#define WM_IMAGE_CFG_0_DEFAULT_WIDTH (0xFFFF)
#define VFE_BUS_WM_IMAGE_CFG_1(n) (BUS_REG_BASE + 0x210 + (n) * 0x100)
#define VFE_BUS_WM_IMAGE_CFG_2(n) (BUS_REG_BASE + 0x214 + (n) * 0x100)
#define VFE_BUS_WM_PACKER_CFG(n) (BUS_REG_BASE + 0x218 + (n) * 0x100)
#define VFE_BUS_WM_HEADER_ADDR(n) (BUS_REG_BASE + 0x220 + (n) * 0x100)
#define VFE_BUS_WM_HEADER_INCR(n) (BUS_REG_BASE + 0x224 + (n) * 0x100)
#define VFE_BUS_WM_HEADER_CFG(n) (BUS_REG_BASE + 0x228 + (n) * 0x100)
#define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (BUS_REG_BASE + 0x230 + (n) * 0x100)
#define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (BUS_REG_BASE + 0x234 + (n) * 0x100)
#define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (BUS_REG_BASE + 0x238 + (n) * 0x100)
#define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (BUS_REG_BASE + 0x23c + (n) * 0x100)
#define VFE_BUS_WM_SYSTEM_CACHE_CFG(n) (BUS_REG_BASE + 0x260 + (n) * 0x100)
#define VFE_BUS_WM_BURST_LIMIT(n) (BUS_REG_BASE + 0x264 + (n) * 0x100)
/* for titan 480, each bus client is hardcoded to a specific path
* and each bus client is part of a hardcoded "comp group"
*/
#define RDI_WM(n) ((vfe_is_lite(vfe) ? 0 : 23) + (n))
#define RDI_COMP_GROUP(n) ((vfe_is_lite(vfe) ? 0 : 11) + (n))
#define MAX_VFE_OUTPUT_LINES 4
static void vfe_global_reset(struct vfe_device *vfe)
{
writel_relaxed(IRQ_MASK_0_RESET_ACK, vfe->base + VFE_IRQ_MASK(0));
writel_relaxed(GLOBAL_RESET_HW_AND_REG, vfe->base + VFE_GLOBAL_RESET_CMD);
}
static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
{
struct v4l2_pix_format_mplane *pix =
&line->video_out.active_fmt.fmt.pix_mp;
wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
/* no clock gating at bus input */
writel_relaxed(WM_CGC_OVERRIDE_ALL, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
writel_relaxed(pix->plane_fmt[0].bytesperline * pix->height,
vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm));
writel_relaxed(WM_IMAGE_CFG_0_DEFAULT_WIDTH,
vfe->base + VFE_BUS_WM_IMAGE_CFG_0(wm));
writel_relaxed(pix->plane_fmt[0].bytesperline,
vfe->base + VFE_BUS_WM_IMAGE_CFG_2(wm));
writel_relaxed(0, vfe->base + VFE_BUS_WM_PACKER_CFG(wm));
/* no dropped frames, one irq per frame */
writel_relaxed(0, vfe->base + VFE_BUS_WM_FRAMEDROP_PERIOD(wm));
writel_relaxed(1, vfe->base + VFE_BUS_WM_FRAMEDROP_PATTERN(wm));
writel_relaxed(0, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(wm));
writel_relaxed(1, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(wm));
writel_relaxed(1 << WM_CFG_EN | MODE_MIPI_RAW << WM_CFG_MODE,
vfe->base + VFE_BUS_WM_CFG(wm));
}
static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
{
wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm));
}
static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
struct vfe_line *line)
{
wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
}
static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
vfe->reg_update |= REG_UPDATE_RDI(vfe, line_id);
writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD);
}
static inline void vfe_reg_update_clear(struct vfe_device *vfe,
enum vfe_line_id line_id)
{
vfe->reg_update &= ~REG_UPDATE_RDI(vfe, line_id);
}
static void vfe_enable_irq(struct vfe_device *vfe)
{
int i;
u32 bus_irq_mask = 0;
if (!vfe->stream_count)
/* enable reset ack IRQ and top BUS status IRQ */
writel(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ,
vfe->base + VFE_IRQ_MASK(0));
for (i = 0; i < MAX_VFE_OUTPUT_LINES; i++) {
/* Enable IRQ for newly added lines, but also keep already running lines's IRQ */
if (vfe->line[i].output.state == VFE_OUTPUT_RESERVED ||
vfe->line[i].output.state == VFE_OUTPUT_ON) {
bus_irq_mask |= BUS_IRQ_MASK_0_RDI_RUP(vfe, i)
| BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i));
}
}
writel(bus_irq_mask, vfe->base + VFE_BUS_IRQ_MASK(0));
}
static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id);
/*
* vfe_isr - VFE module interrupt handler
* @irq: Interrupt line
* @dev: VFE device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t vfe_isr(int irq, void *dev)
{
struct vfe_device *vfe = dev;
u32 status;
int i;
status = readl_relaxed(vfe->base + VFE_IRQ_STATUS(0));
writel_relaxed(status, vfe->base + VFE_IRQ_CLEAR(0));
writel_relaxed(IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
if (status & IRQ_MASK_0_RESET_ACK)
vfe_isr_reset_ack(vfe);
if (status & IRQ_MASK_0_BUS_TOP_IRQ) {
u32 status = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(0));
writel_relaxed(status, vfe->base + VFE_BUS_IRQ_CLEAR(0));
writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
/* Loop through all WMs IRQs */
for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) {
if (status & BUS_IRQ_MASK_0_RDI_RUP(vfe, i))
vfe_isr_reg_update(vfe, i);
if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i)))
vfe_buf_done(vfe, i);
}
}
return IRQ_HANDLED;
}
/*
* vfe_halt - Trigger halt on VFE module and wait to complete
* @vfe: VFE device
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_halt(struct vfe_device *vfe)
{
/* rely on vfe_disable_output() to stop the VFE */
return 0;
}
/*
* vfe_isr_reg_update - Process reg update interrupt
* @vfe: VFE Device
* @line_id: VFE line
*/
static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
struct vfe_output *output;
unsigned long flags;
spin_lock_irqsave(&vfe->output_lock, flags);
vfe_reg_update_clear(vfe, line_id);
output = &vfe->line[line_id].output;
if (output->wait_reg_update) {
output->wait_reg_update = 0;
complete(&output->reg_update);
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
static const struct camss_video_ops vfe_video_ops_480 = {
.queue_buffer = vfe_queue_buffer_v2,
.flush_buffers = vfe_flush_buffers,
};
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
vfe->video_ops = vfe_video_ops_480;
}
static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
{
/* nop */
}
static void vfe_violation_read(struct vfe_device *vfe)
{
/* nop */
}
static void vfe_buf_done_480(struct vfe_device *vfe, int port_id)
{
/* nop */
}
const struct vfe_hw_ops vfe_ops_480 = {
.enable_irq = vfe_enable_irq,
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr = vfe_isr,
.isr_read = vfe_isr_read,
.reg_update = vfe_reg_update,
.reg_update_clear = vfe_reg_update_clear,
.pm_domain_off = vfe_pm_domain_off,
.pm_domain_on = vfe_pm_domain_on,
.subdev_init = vfe_subdev_init,
.vfe_disable = vfe_disable,
.vfe_enable = vfe_enable_v2,
.vfe_halt = vfe_halt,
.violation_read = vfe_violation_read,
.vfe_wm_start = vfe_wm_start,
.vfe_wm_stop = vfe_wm_stop,
.vfe_buf_done = vfe_buf_done_480,
.vfe_wm_update = vfe_wm_update,
};
|