summaryrefslogtreecommitdiff
path: root/drivers/media/pci/cx18/cx18-io.h
blob: 190b142d047eb1604ef506847449434566c508e3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 *  cx18 driver PCI memory mapped IO access routines
 *
 *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
 *  Copyright (C) 2008  Andy Walls <awalls@md.metrocast.net>
 */

#ifndef CX18_IO_H
#define CX18_IO_H

#include "cx18-driver.h"

/*
 * Readback and retry of MMIO access for reliability:
 * The concept was suggested by Steve Toth <stoth@linuxtv.org>.
 * The implementation is the fault of Andy Walls <awalls@md.metrocast.net>.
 *
 * *write* functions are implied to retry the mmio unless suffixed with _noretry
 * *read* functions never retry the mmio (it never helps to do so)
 */

/* Non byteswapping memory mapped IO */
static inline u32 cx18_raw_readl(struct cx18 *cx, const void __iomem *addr)
{
	return __raw_readl(addr);
}

static inline
void cx18_raw_writel_noretry(struct cx18 *cx, u32 val, void __iomem *addr)
{
	__raw_writel(val, addr);
}

static inline void cx18_raw_writel(struct cx18 *cx, u32 val, void __iomem *addr)
{
	int i;
	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
		cx18_raw_writel_noretry(cx, val, addr);
		if (val == cx18_raw_readl(cx, addr))
			break;
	}
}

/* Normal memory mapped IO */
static inline u32 cx18_readl(struct cx18 *cx, const void __iomem *addr)
{
	return readl(addr);
}

static inline
void cx18_writel_noretry(struct cx18 *cx, u32 val, void __iomem *addr)
{
	writel(val, addr);
}

static inline void cx18_writel(struct cx18 *cx, u32 val, void __iomem *addr)
{
	int i;
	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
		cx18_writel_noretry(cx, val, addr);
		if (val == cx18_readl(cx, addr))
			break;
	}
}

static inline
void cx18_writel_expect(struct cx18 *cx, u32 val, void __iomem *addr,
			u32 eval, u32 mask)
{
	int i;
	u32 r;
	eval &= mask;
	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
		cx18_writel_noretry(cx, val, addr);
		r = cx18_readl(cx, addr);
		if (r == 0xffffffff && eval != 0xffffffff)
			continue;
		if (eval == (r & mask))
			break;
	}
}

static inline u16 cx18_readw(struct cx18 *cx, const void __iomem *addr)
{
	return readw(addr);
}

static inline
void cx18_writew_noretry(struct cx18 *cx, u16 val, void __iomem *addr)
{
	writew(val, addr);
}

static inline void cx18_writew(struct cx18 *cx, u16 val, void __iomem *addr)
{
	int i;
	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
		cx18_writew_noretry(cx, val, addr);
		if (val == cx18_readw(cx, addr))
			break;
	}
}

static inline u8 cx18_readb(struct cx18 *cx, const void __iomem *addr)
{
	return readb(addr);
}

static inline
void cx18_writeb_noretry(struct cx18 *cx, u8 val, void __iomem *addr)
{
	writeb(val, addr);
}

static inline void cx18_writeb(struct cx18 *cx, u8 val, void __iomem *addr)
{
	int i;
	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
		cx18_writeb_noretry(cx, val, addr);
		if (val == cx18_readb(cx, addr))
			break;
	}
}

static inline
void cx18_memcpy_fromio(struct cx18 *cx, void *to,
			const void __iomem *from, unsigned int len)
{
	memcpy_fromio(to, from, len);
}

void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count);


/* Access "register" region of CX23418 memory mapped I/O */
static inline void cx18_write_reg_noretry(struct cx18 *cx, u32 val, u32 reg)
{
	cx18_writel_noretry(cx, val, cx->reg_mem + reg);
}

static inline void cx18_write_reg(struct cx18 *cx, u32 val, u32 reg)
{
	cx18_writel(cx, val, cx->reg_mem + reg);
}

static inline void cx18_write_reg_expect(struct cx18 *cx, u32 val, u32 reg,
					 u32 eval, u32 mask)
{
	cx18_writel_expect(cx, val, cx->reg_mem + reg, eval, mask);
}

static inline u32 cx18_read_reg(struct cx18 *cx, u32 reg)
{
	return cx18_readl(cx, cx->reg_mem + reg);
}


/* Access "encoder memory" region of CX23418 memory mapped I/O */
static inline void cx18_write_enc(struct cx18 *cx, u32 val, u32 addr)
{
	cx18_writel(cx, val, cx->enc_mem + addr);
}

static inline u32 cx18_read_enc(struct cx18 *cx, u32 addr)
{
	return cx18_readl(cx, cx->enc_mem + addr);
}

void cx18_sw1_irq_enable(struct cx18 *cx, u32 val);
void cx18_sw1_irq_disable(struct cx18 *cx, u32 val);
void cx18_sw2_irq_enable(struct cx18 *cx, u32 val);
void cx18_sw2_irq_disable(struct cx18 *cx, u32 val);
void cx18_sw2_irq_disable_cpu(struct cx18 *cx, u32 val);
void cx18_setup_page(struct cx18 *cx, u32 addr);

#endif /* CX18_IO_H */