blob: 66d0da22389f20986bd4f51ee78f4144c35ed937 [file] [log] [blame]
Hans Verkuil1a0adaf2007-04-27 12:31:25 -03001/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
Hans Verkuil1a0adaf2007-04-27 12:31:25 -030022#include "ivtv-queue.h"
23#include "ivtv-udma.h"
24#include "ivtv-irq.h"
Hans Verkuil1a0adaf2007-04-27 12:31:25 -030025#include "ivtv-mailbox.h"
26#include "ivtv-vbi.h"
Hans Verkuil1e13f9e2007-03-10 06:52:02 -030027#include "ivtv-yuv.h"
Hans Verkuil1a0adaf2007-04-27 12:31:25 -030028
29#define DMA_MAGIC_COOKIE 0x000001fe
30
Hans Verkuil1a0adaf2007-04-27 12:31:25 -030031static void ivtv_dma_dec_start(struct ivtv_stream *s);
32
33static const int ivtv_stream_map[] = {
34 IVTV_ENC_STREAM_TYPE_MPG,
35 IVTV_ENC_STREAM_TYPE_YUV,
36 IVTV_ENC_STREAM_TYPE_PCM,
37 IVTV_ENC_STREAM_TYPE_VBI,
38};
39
Hans Verkuil1a0adaf2007-04-27 12:31:25 -030040
Hans Verkuildc02d502007-05-19 14:07:16 -030041static void ivtv_pio_work_handler(struct ivtv *itv)
42{
43 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44 struct ivtv_buffer *buf;
45 struct list_head *p;
46 int i = 0;
47
Hans Verkuilbd58df62007-07-10 17:47:07 -030048 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
Hans Verkuildc02d502007-05-19 14:07:16 -030049 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
50 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
51 itv->cur_pio_stream = -1;
52 /* trigger PIO complete user interrupt */
53 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
54 return;
55 }
Hans Verkuilbd58df62007-07-10 17:47:07 -030056 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
Hans Verkuildc02d502007-05-19 14:07:16 -030057 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
58 list_for_each(p, &s->q_dma.list) {
59 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
Hans Verkuil37093b12007-07-28 19:45:50 -030060 u32 size = s->sg_processing[i].size & 0x3ffff;
Hans Verkuildc02d502007-05-19 14:07:16 -030061
62 /* Copy the data from the card to the buffer */
63 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
Hans Verkuil37093b12007-07-28 19:45:50 -030064 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
Hans Verkuildc02d502007-05-19 14:07:16 -030065 }
66 else {
Hans Verkuil37093b12007-07-28 19:45:50 -030067 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
Hans Verkuildc02d502007-05-19 14:07:16 -030068 }
Hans Verkuildc02d502007-05-19 14:07:16 -030069 i++;
Hans Verkuil37093b12007-07-28 19:45:50 -030070 if (i == s->sg_processing_size)
71 break;
Hans Verkuildc02d502007-05-19 14:07:16 -030072 }
73 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -030074}
75
Hans Verkuil1e13f9e2007-03-10 06:52:02 -030076void ivtv_irq_work_handler(struct work_struct *work)
77{
78 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
79
80 DEFINE_WAIT(wait);
81
Hans Verkuildc02d502007-05-19 14:07:16 -030082 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
83 ivtv_pio_work_handler(itv);
84
Hans Verkuil1e13f9e2007-03-10 06:52:02 -030085 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
Hans Verkuildc02d502007-05-19 14:07:16 -030086 ivtv_vbi_work_handler(itv);
Hans Verkuil1e13f9e2007-03-10 06:52:02 -030087
88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
89 ivtv_yuv_work_handler(itv);
90}
91
Hans Verkuil1a0adaf2007-04-27 12:31:25 -030092/* Determine the required DMA size, setup enough buffers in the predma queue and
93 actually copy the data from the card to the buffers in case a PIO transfer is
94 required for this stream.
95 */
96static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
97{
98 struct ivtv *itv = s->itv;
99 struct ivtv_buffer *buf;
100 struct list_head *p;
101 u32 bytes_needed = 0;
102 u32 offset, size;
103 u32 UVoffset = 0, UVsize = 0;
104 int skip_bufs = s->q_predma.buffers;
Hans Verkuil37093b12007-07-28 19:45:50 -0300105 int idx = s->sg_pending_size;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300106 int rc;
107
108 /* sanity checks */
109 if (s->v4l2dev == NULL) {
110 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
111 return -1;
112 }
113 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
114 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
115 return -1;
116 }
117
118 /* determine offset, size and PTS for the various streams */
119 switch (s->type) {
120 case IVTV_ENC_STREAM_TYPE_MPG:
121 offset = data[1];
122 size = data[2];
Hans Verkuil37093b12007-07-28 19:45:50 -0300123 s->pending_pts = 0;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300124 break;
125
126 case IVTV_ENC_STREAM_TYPE_YUV:
127 offset = data[1];
128 size = data[2];
129 UVoffset = data[3];
130 UVsize = data[4];
Hans Verkuil37093b12007-07-28 19:45:50 -0300131 s->pending_pts = ((u64) data[5] << 32) | data[6];
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300132 break;
133
134 case IVTV_ENC_STREAM_TYPE_PCM:
135 offset = data[1] + 12;
136 size = data[2] - 12;
Hans Verkuil37093b12007-07-28 19:45:50 -0300137 s->pending_pts = read_dec(offset - 8) |
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300138 ((u64)(read_dec(offset - 12)) << 32);
139 if (itv->has_cx23415)
140 offset += IVTV_DECODER_OFFSET;
141 break;
142
143 case IVTV_ENC_STREAM_TYPE_VBI:
144 size = itv->vbi.enc_size * itv->vbi.fpi;
145 offset = read_enc(itv->vbi.enc_start - 4) + 12;
146 if (offset == 12) {
147 IVTV_DEBUG_INFO("VBI offset == 0\n");
148 return -1;
149 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300150 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300151 break;
152
153 case IVTV_DEC_STREAM_TYPE_VBI:
154 size = read_dec(itv->vbi.dec_start + 4) + 8;
155 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
Hans Verkuil37093b12007-07-28 19:45:50 -0300156 s->pending_pts = 0;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300157 offset += IVTV_DECODER_OFFSET;
158 break;
159 default:
160 /* shouldn't happen */
161 return -1;
162 }
163
164 /* if this is the start of the DMA then fill in the magic cookie */
Hans Verkuil51a99c02007-08-18 15:16:00 -0300165 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300166 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
167 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
Hans Verkuil37093b12007-07-28 19:45:50 -0300168 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300169 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
170 }
171 else {
Hans Verkuil37093b12007-07-28 19:45:50 -0300172 s->pending_backup = read_enc(offset);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300173 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
174 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300175 s->pending_offset = offset;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300176 }
177
178 bytes_needed = size;
179 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
180 /* The size for the Y samples needs to be rounded upwards to a
181 multiple of the buf_size. The UV samples then start in the
182 next buffer. */
183 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
184 bytes_needed += UVsize;
185 }
186
Hans Verkuilbd58df62007-07-10 17:47:07 -0300187 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300188 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
189
190 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
191 if (rc < 0) { /* Insufficient buffers */
192 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
193 bytes_needed, s->name);
194 return -1;
195 }
196 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
197 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
198 IVTV_WARN("Cause: the application is not reading fast enough.\n");
199 }
200 s->buffers_stolen = rc;
201
Hans Verkuil37093b12007-07-28 19:45:50 -0300202 /* got the buffers, now fill in sg_pending */
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300203 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
204 memset(buf->buf, 0, 128);
205 list_for_each(p, &s->q_predma.list) {
206 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
207
208 if (skip_bufs-- > 0)
209 continue;
Hans Verkuil37093b12007-07-28 19:45:50 -0300210 s->sg_pending[idx].dst = buf->dma_handle;
211 s->sg_pending[idx].src = offset;
212 s->sg_pending[idx].size = s->buf_size;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300213 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
Hans Verkuilf4071b82007-07-28 12:07:12 -0300214 buf->dma_xfer_cnt = s->dma_xfer_cnt;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300215
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300216 s->q_predma.bytesused += buf->bytesused;
217 size -= buf->bytesused;
218 offset += s->buf_size;
219
220 /* Sync SG buffers */
221 ivtv_buf_sync_for_device(s, buf);
222
223 if (size == 0) { /* YUV */
224 /* process the UV section */
225 offset = UVoffset;
226 size = UVsize;
227 }
228 idx++;
229 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300230 s->sg_pending_size = idx;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300231 return 0;
232}
233
234static void dma_post(struct ivtv_stream *s)
235{
236 struct ivtv *itv = s->itv;
237 struct ivtv_buffer *buf = NULL;
238 struct list_head *p;
239 u32 offset;
240 u32 *u32buf;
241 int x = 0;
242
Hans Verkuilbd58df62007-07-10 17:47:07 -0300243 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300244 s->name, s->dma_offset);
245 list_for_each(p, &s->q_dma.list) {
246 buf = list_entry(p, struct ivtv_buffer, list);
247 u32buf = (u32 *)buf->buf;
248
249 /* Sync Buffer */
250 ivtv_buf_sync_for_cpu(s, buf);
251
Hans Verkuil51a99c02007-08-18 15:16:00 -0300252 if (x == 0 && ivtv_use_dma(s)) {
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300253 offset = s->dma_last_offset;
254 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
255 {
256 for (offset = 0; offset < 64; offset++) {
257 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
258 break;
259 }
260 }
261 offset *= 4;
262 if (offset == 256) {
263 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
264 offset = s->dma_last_offset;
265 }
266 if (s->dma_last_offset != offset)
267 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
268 s->dma_last_offset = offset;
269 }
270 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
271 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
272 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
273 }
274 else {
275 write_enc_sync(0, s->dma_offset);
276 }
277 if (offset) {
278 buf->bytesused -= offset;
279 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
280 }
281 *u32buf = cpu_to_le32(s->dma_backup);
282 }
283 x++;
284 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
285 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
286 s->type == IVTV_ENC_STREAM_TYPE_VBI)
Hans Verkuilf4071b82007-07-28 12:07:12 -0300287 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300288 }
289 if (buf)
290 buf->bytesused += s->dma_last_offset;
291 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
Hans Verkuildc02d502007-05-19 14:07:16 -0300292 list_for_each(p, &s->q_dma.list) {
293 buf = list_entry(p, struct ivtv_buffer, list);
294
295 /* Parse and Groom VBI Data */
296 s->q_dma.bytesused -= buf->bytesused;
297 ivtv_process_vbi_data(itv, buf, 0, s->type);
298 s->q_dma.bytesused += buf->bytesused;
299 }
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300300 if (s->id == -1) {
301 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
302 return;
303 }
304 }
305 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
306 if (s->id != -1)
307 wake_up(&s->waitq);
308}
309
310void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
311{
312 struct ivtv *itv = s->itv;
313 struct ivtv_buffer *buf;
314 struct list_head *p;
315 u32 y_size = itv->params.height * itv->params.width;
316 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
317 int y_done = 0;
318 int bytes_written = 0;
319 unsigned long flags = 0;
320 int idx = 0;
321
Hans Verkuilbd58df62007-07-10 17:47:07 -0300322 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300323 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
324 list_for_each(p, &s->q_predma.list) {
325 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
326
327 /* YUV UV Offset from Y Buffer */
328 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
329 offset = uv_offset;
330 y_done = 1;
331 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300332 s->sg_pending[idx].src = buf->dma_handle;
333 s->sg_pending[idx].dst = offset;
334 s->sg_pending[idx].size = buf->bytesused;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300335
336 offset += buf->bytesused;
337 bytes_written += buf->bytesused;
338
339 /* Sync SG buffers */
340 ivtv_buf_sync_for_device(s, buf);
341 idx++;
342 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300343 s->sg_pending_size = idx;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300344
345 /* Sync Hardware SG List of buffers */
346 ivtv_stream_sync_for_device(s);
347 if (lock)
348 spin_lock_irqsave(&itv->dma_reg_lock, flags);
349 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
350 ivtv_dma_dec_start(s);
351 }
352 else {
353 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
354 }
355 if (lock)
356 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
357}
358
Hans Verkuil37093b12007-07-28 19:45:50 -0300359static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
360{
361 struct ivtv *itv = s->itv;
362
363 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
364 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
365 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
366 s->sg_processed++;
367 /* Sync Hardware SG List of buffers */
368 ivtv_stream_sync_for_device(s);
369 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
370 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
371}
372
373static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
374{
375 struct ivtv *itv = s->itv;
376
377 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
378 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
379 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
380 s->sg_processed++;
381 /* Sync Hardware SG List of buffers */
382 ivtv_stream_sync_for_device(s);
383 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
384 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
385}
386
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300387/* start the encoder DMA */
388static void ivtv_dma_enc_start(struct ivtv_stream *s)
389{
390 struct ivtv *itv = s->itv;
391 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
392 int i;
393
Hans Verkuilbd58df62007-07-10 17:47:07 -0300394 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
Hans Verkuildc02d502007-05-19 14:07:16 -0300395
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300396 if (s->q_predma.bytesused)
397 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
Hans Verkuildc02d502007-05-19 14:07:16 -0300398
399 if (ivtv_use_dma(s))
Hans Verkuil37093b12007-07-28 19:45:50 -0300400 s->sg_pending[s->sg_pending_size - 1].size += 256;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300401
402 /* If this is an MPEG stream, and VBI data is also pending, then append the
403 VBI DMA to the MPEG DMA and transfer both sets of data at once.
404
405 VBI DMA is a second class citizen compared to MPEG and mixing them together
406 will confuse the firmware (the end of a VBI DMA is seen as the end of a
407 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
408 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
409 use. This way no conflicts occur. */
410 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
Hans Verkuil37093b12007-07-28 19:45:50 -0300411 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
412 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300413 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
Hans Verkuildc02d502007-05-19 14:07:16 -0300414 if (ivtv_use_dma(s_vbi))
Hans Verkuil37093b12007-07-28 19:45:50 -0300415 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
416 for (i = 0; i < s_vbi->sg_pending_size; i++) {
417 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300418 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300419 s_vbi->dma_offset = s_vbi->pending_offset;
420 s_vbi->sg_pending_size = 0;
Hans Verkuilf4071b82007-07-28 12:07:12 -0300421 s_vbi->dma_xfer_cnt++;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300422 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
Hans Verkuilbd58df62007-07-10 17:47:07 -0300423 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300424 }
425
Hans Verkuilf4071b82007-07-28 12:07:12 -0300426 s->dma_xfer_cnt++;
Hans Verkuil37093b12007-07-28 19:45:50 -0300427 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
428 s->sg_processing_size = s->sg_pending_size;
429 s->sg_pending_size = 0;
430 s->sg_processed = 0;
431 s->dma_offset = s->pending_offset;
432 s->dma_backup = s->pending_backup;
433 s->dma_pts = s->pending_pts;
Hans Verkuildd1e7292007-07-18 13:22:06 -0300434
Hans Verkuildc02d502007-05-19 14:07:16 -0300435 if (ivtv_use_pio(s)) {
Hans Verkuildc02d502007-05-19 14:07:16 -0300436 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
437 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
438 set_bit(IVTV_F_I_PIO, &itv->i_flags);
439 itv->cur_pio_stream = s->type;
440 }
441 else {
Hans Verkuil37093b12007-07-28 19:45:50 -0300442 itv->dma_retries = 0;
443 ivtv_dma_enc_start_xfer(s);
Hans Verkuildc02d502007-05-19 14:07:16 -0300444 set_bit(IVTV_F_I_DMA, &itv->i_flags);
445 itv->cur_dma_stream = s->type;
Mauro Carvalho Chehab201700d2007-07-19 11:21:04 -0300446 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
Hans Verkuildc02d502007-05-19 14:07:16 -0300447 add_timer(&itv->dma_timer);
448 }
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300449}
450
451static void ivtv_dma_dec_start(struct ivtv_stream *s)
452{
453 struct ivtv *itv = s->itv;
454
455 if (s->q_predma.bytesused)
456 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
Hans Verkuil37093b12007-07-28 19:45:50 -0300457 s->dma_xfer_cnt++;
458 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
459 s->sg_processing_size = s->sg_pending_size;
460 s->sg_pending_size = 0;
461 s->sg_processed = 0;
462
Hans Verkuilbd58df62007-07-10 17:47:07 -0300463 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
Hans Verkuil37093b12007-07-28 19:45:50 -0300464 itv->dma_retries = 0;
465 ivtv_dma_dec_start_xfer(s);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300466 set_bit(IVTV_F_I_DMA, &itv->i_flags);
467 itv->cur_dma_stream = s->type;
Mauro Carvalho Chehab201700d2007-07-19 11:21:04 -0300468 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300469 add_timer(&itv->dma_timer);
470}
471
472static void ivtv_irq_dma_read(struct ivtv *itv)
473{
474 struct ivtv_stream *s = NULL;
475 struct ivtv_buffer *buf;
Hans Verkuil37093b12007-07-28 19:45:50 -0300476 int hw_stream_type = 0;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300477
Hans Verkuilbd58df62007-07-10 17:47:07 -0300478 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
Hans Verkuil37093b12007-07-28 19:45:50 -0300479 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
480 del_timer(&itv->dma_timer);
481 return;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300482 }
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300483
Hans Verkuil37093b12007-07-28 19:45:50 -0300484 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
485 s = &itv->streams[itv->cur_dma_stream];
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300486 ivtv_stream_sync_for_cpu(s);
487
Hans Verkuil37093b12007-07-28 19:45:50 -0300488 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
489 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
490 read_reg(IVTV_REG_DMASTATUS),
491 s->sg_processed, s->sg_processing_size, itv->dma_retries);
492 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
493 if (itv->dma_retries == 3) {
Hans Verkuile17a06b2007-08-18 15:48:42 -0300494 /* Too many retries, give up on this frame */
Hans Verkuil37093b12007-07-28 19:45:50 -0300495 itv->dma_retries = 0;
Hans Verkuile17a06b2007-08-18 15:48:42 -0300496 s->sg_processed = s->sg_processing_size;
Hans Verkuil37093b12007-07-28 19:45:50 -0300497 }
498 else {
499 /* Retry, starting with the first xfer segment.
500 Just retrying the current segment is not sufficient. */
501 s->sg_processed = 0;
502 itv->dma_retries++;
503 }
504 }
505 if (s->sg_processed < s->sg_processing_size) {
506 /* DMA next buffer */
507 ivtv_dma_dec_start_xfer(s);
508 return;
509 }
510 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
511 hw_stream_type = 2;
512 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
513
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300514 /* For some reason must kick the firmware, like PIO mode,
515 I think this tells the firmware we are done and the size
516 of the xfer so it can calculate what we need next.
517 I think we can do this part ourselves but would have to
518 fully calculate xfer info ourselves and not use interrupts
519 */
520 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
521 hw_stream_type);
522
523 /* Free last DMA call */
524 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
525 ivtv_buf_sync_for_cpu(s, buf);
526 ivtv_enqueue(s, buf, &s->q_free);
527 }
528 wake_up(&s->waitq);
529 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300530 del_timer(&itv->dma_timer);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300531 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
532 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
533 itv->cur_dma_stream = -1;
534 wake_up(&itv->dma_waitq);
535}
536
537static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
538{
539 u32 data[CX2341X_MBOX_MAX_DATA];
540 struct ivtv_stream *s;
541
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300542 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
Hans Verkuil37093b12007-07-28 19:45:50 -0300543 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
544 if (itv->cur_dma_stream < 0) {
545 del_timer(&itv->dma_timer);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300546 return;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300547 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300548 s = &itv->streams[itv->cur_dma_stream];
549 ivtv_stream_sync_for_cpu(s);
550
551 if (data[0] & 0x18) {
552 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
553 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
554 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
555 if (itv->dma_retries == 3) {
Hans Verkuile17a06b2007-08-18 15:48:42 -0300556 /* Too many retries, give up on this frame */
Hans Verkuil37093b12007-07-28 19:45:50 -0300557 itv->dma_retries = 0;
Hans Verkuile17a06b2007-08-18 15:48:42 -0300558 s->sg_processed = s->sg_processing_size;
Hans Verkuil37093b12007-07-28 19:45:50 -0300559 }
560 else {
561 /* Retry, starting with the first xfer segment.
562 Just retrying the current segment is not sufficient. */
563 s->sg_processed = 0;
564 itv->dma_retries++;
565 }
566 }
567 if (s->sg_processed < s->sg_processing_size) {
568 /* DMA next buffer */
569 ivtv_dma_enc_start_xfer(s);
570 return;
571 }
572 del_timer(&itv->dma_timer);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300573 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
574 itv->cur_dma_stream = -1;
575 dma_post(s);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300576 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300577 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300578 dma_post(s);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300579 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300580 s->sg_processing_size = 0;
581 s->sg_processed = 0;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300582 wake_up(&itv->dma_waitq);
583}
584
Hans Verkuildc02d502007-05-19 14:07:16 -0300585static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
586{
587 struct ivtv_stream *s;
588
589 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
590 itv->cur_pio_stream = -1;
591 return;
592 }
593 s = &itv->streams[itv->cur_pio_stream];
Hans Verkuilbd58df62007-07-10 17:47:07 -0300594 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
Hans Verkuildc02d502007-05-19 14:07:16 -0300595 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
596 itv->cur_pio_stream = -1;
597 dma_post(s);
598 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
599 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
600 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
601 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
602 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
603 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
604 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
605 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
Hans Verkuildc02d502007-05-19 14:07:16 -0300606 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
Hans Verkuildc02d502007-05-19 14:07:16 -0300607 dma_post(s);
Hans Verkuildc02d502007-05-19 14:07:16 -0300608 }
609 wake_up(&itv->dma_waitq);
610}
611
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300612static void ivtv_irq_dma_err(struct ivtv *itv)
613{
614 u32 data[CX2341X_MBOX_MAX_DATA];
615
616 del_timer(&itv->dma_timer);
617 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
618 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
Hans Verkuil37093b12007-07-28 19:45:50 -0300619 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
620 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300621 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
622 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
623 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
624
625 /* retry */
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300626 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
627 ivtv_dma_dec_start(s);
628 else
629 ivtv_dma_enc_start(s);
630 return;
631 }
Hans Verkuil37093b12007-07-28 19:45:50 -0300632 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
633 ivtv_udma_start(itv);
634 return;
635 }
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300636 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
637 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
638 itv->cur_dma_stream = -1;
639 wake_up(&itv->dma_waitq);
640}
641
642static void ivtv_irq_enc_start_cap(struct ivtv *itv)
643{
644 u32 data[CX2341X_MBOX_MAX_DATA];
645 struct ivtv_stream *s;
646
647 /* Get DMA destination and size arguments from card */
648 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
Hans Verkuilbd58df62007-07-10 17:47:07 -0300649 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300650
651 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
652 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
653 data[0], data[1], data[2]);
654 return;
655 }
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300656 s = &itv->streams[ivtv_stream_map[data[0]]];
657 if (!stream_enc_dma_append(s, data)) {
Hans Verkuildc02d502007-05-19 14:07:16 -0300658 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300659 }
660}
661
662static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
663{
664 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
665 u32 data[CX2341X_MBOX_MAX_DATA];
666 struct ivtv_stream *s;
667
Hans Verkuilbd58df62007-07-10 17:47:07 -0300668 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300669 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
670
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300671 /* If more than two VBI buffers are pending, then
672 clear the old ones and start with this new one.
673 This can happen during transition stages when MPEG capturing is
674 started, but the first interrupts haven't arrived yet. During
675 that period VBI requests can accumulate without being able to
676 DMA the data. Since at most four VBI DMA buffers are available,
677 we just drop the old requests when there are already three
678 requests queued. */
Hans Verkuil37093b12007-07-28 19:45:50 -0300679 if (s->sg_pending_size > 2) {
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300680 struct list_head *p;
681 list_for_each(p, &s->q_predma.list) {
682 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
683 ivtv_buf_sync_for_cpu(s, buf);
684 }
685 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
Hans Verkuil37093b12007-07-28 19:45:50 -0300686 s->sg_pending_size = 0;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300687 }
688 /* if we can append the data, and the MPEG stream isn't capturing,
689 then start a DMA request for just the VBI data. */
690 if (!stream_enc_dma_append(s, data) &&
691 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
Hans Verkuildc02d502007-05-19 14:07:16 -0300692 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300693 }
694}
695
Hans Verkuildc02d502007-05-19 14:07:16 -0300696static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300697{
698 u32 data[CX2341X_MBOX_MAX_DATA];
699 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
700
Hans Verkuilbd58df62007-07-10 17:47:07 -0300701 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300702 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
703 !stream_enc_dma_append(s, data)) {
Hans Verkuildc02d502007-05-19 14:07:16 -0300704 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300705 }
706}
707
708static void ivtv_irq_dec_data_req(struct ivtv *itv)
709{
710 u32 data[CX2341X_MBOX_MAX_DATA];
711 struct ivtv_stream *s;
712
713 /* YUV or MPG */
714 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
715
716 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
717 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
718 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
719 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
720 }
721 else {
722 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
723 itv->dma_data_req_offset = data[1];
724 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
725 }
Hans Verkuilbd58df62007-07-10 17:47:07 -0300726 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300727 itv->dma_data_req_offset, itv->dma_data_req_size);
728 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
729 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
730 }
731 else {
732 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
733 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
734 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
735 }
736}
737
738static void ivtv_irq_vsync(struct ivtv *itv)
739{
740 /* The vsync interrupt is unusual in that it won't clear until
741 * the end of the first line for the current field, at which
742 * point it clears itself. This can result in repeated vsync
743 * interrupts, or a missed vsync. Read some of the registers
744 * to determine the line being displayed and ensure we handle
745 * one vsync per frame.
746 */
747 unsigned int frame = read_reg(0x28c0) & 1;
748 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
749
750 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
751
Ian Armstrongbfd7bea2007-08-03 10:01:39 -0300752 if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
Hans Verkuila158f352007-08-23 11:31:57 -0300753 ((itv->last_vsync_field & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
754 (frame != (itv->last_vsync_field & 1) && !itv->yuv_info.frame_interlaced)) {
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300755 int next_dma_frame = last_dma_frame;
756
Ian Armstrongbfd7bea2007-08-03 10:01:39 -0300757 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
758 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
759 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
760 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
761 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
762 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
763 next_dma_frame = (next_dma_frame + 1) & 0x3;
764 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
765 itv->yuv_info.fields_lapsed = -1;
766 }
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300767 }
768 }
Hans Verkuila158f352007-08-23 11:31:57 -0300769 if (frame != (itv->last_vsync_field & 1)) {
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300770 struct ivtv_stream *s = ivtv_get_output_stream(itv);
771
Hans Verkuila158f352007-08-23 11:31:57 -0300772 itv->last_vsync_field += 1;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300773 if (frame == 0) {
774 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
775 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
776 }
777 else {
778 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
779 }
780 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
781 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
782 wake_up(&itv->event_waitq);
783 }
784 wake_up(&itv->vsync_waitq);
785 if (s)
786 wake_up(&s->waitq);
787
788 /* Send VBI to saa7127 */
Hans Verkuil1e13f9e2007-03-10 06:52:02 -0300789 if (frame) {
790 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
Hans Verkuildc02d502007-05-19 14:07:16 -0300791 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
Hans Verkuil1e13f9e2007-03-10 06:52:02 -0300792 }
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300793
794 /* Check if we need to update the yuv registers */
795 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
796 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
797 last_dma_frame = (last_dma_frame - 1) & 3;
798
799 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
800 itv->yuv_info.update_frame = last_dma_frame;
801 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
802 itv->yuv_info.yuv_forced_update = 0;
Hans Verkuil1e13f9e2007-03-10 06:52:02 -0300803 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
Hans Verkuildc02d502007-05-19 14:07:16 -0300804 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300805 }
806 }
Ian Armstrongbfd7bea2007-08-03 10:01:39 -0300807
808 itv->yuv_info.fields_lapsed ++;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300809 }
810}
811
812#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
813
814irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
815{
816 struct ivtv *itv = (struct ivtv *)dev_id;
817 u32 combo;
818 u32 stat;
819 int i;
820 u8 vsync_force = 0;
821
822 spin_lock(&itv->dma_reg_lock);
823 /* get contents of irq status register */
824 stat = read_reg(IVTV_REG_IRQSTATUS);
825
826 combo = ~itv->irqmask & stat;
827
828 /* Clear out IRQ */
829 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
830
831 if (0 == combo) {
832 /* The vsync interrupt is unusual and clears itself. If we
833 * took too long, we may have missed it. Do some checks
834 */
835 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
836 /* vsync is enabled, see if we're in a new field */
Hans Verkuila158f352007-08-23 11:31:57 -0300837 if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300838 /* New field, looks like we missed it */
839 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
840 vsync_force = 1;
841 }
842 }
843
844 if (!vsync_force) {
845 /* No Vsync expected, wasn't for us */
846 spin_unlock(&itv->dma_reg_lock);
847 return IRQ_NONE;
848 }
849 }
850
851 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
852 these messages */
853 if (combo & ~0xff6d0400)
Hans Verkuilbd58df62007-07-10 17:47:07 -0300854 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300855
856 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
Hans Verkuilbd58df62007-07-10 17:47:07 -0300857 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300858 }
859
860 if (combo & IVTV_IRQ_DMA_READ) {
861 ivtv_irq_dma_read(itv);
862 }
863
864 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
865 ivtv_irq_enc_dma_complete(itv);
866 }
867
Hans Verkuildc02d502007-05-19 14:07:16 -0300868 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
869 ivtv_irq_enc_pio_complete(itv);
870 }
871
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300872 if (combo & IVTV_IRQ_DMA_ERR) {
873 ivtv_irq_dma_err(itv);
874 }
875
876 if (combo & IVTV_IRQ_ENC_START_CAP) {
877 ivtv_irq_enc_start_cap(itv);
878 }
879
880 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
881 ivtv_irq_enc_vbi_cap(itv);
882 }
883
884 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
Hans Verkuildc02d502007-05-19 14:07:16 -0300885 ivtv_irq_dec_vbi_reinsert(itv);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300886 }
887
888 if (combo & IVTV_IRQ_ENC_EOS) {
889 IVTV_DEBUG_IRQ("ENC EOS\n");
890 set_bit(IVTV_F_I_EOS, &itv->i_flags);
Hans Verkuilfd8b2812007-08-23 10:13:15 -0300891 wake_up(&itv->eos_waitq);
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300892 }
893
894 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
895 ivtv_irq_dec_data_req(itv);
896 }
897
898 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
899 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
900 ivtv_irq_vsync(itv);
901 }
902
903 if (combo & IVTV_IRQ_ENC_VIM_RST) {
904 IVTV_DEBUG_IRQ("VIM RST\n");
905 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
906 }
907
908 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
909 IVTV_DEBUG_INFO("Stereo mode changed\n");
910 }
911
912 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
Hans Verkuil33bc4de2007-08-18 11:36:09 -0300913 itv->irq_rr_idx++;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300914 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
Hans Verkuil33bc4de2007-08-18 11:36:09 -0300915 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300916 struct ivtv_stream *s = &itv->streams[idx];
917
918 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
919 continue;
920 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
921 ivtv_dma_dec_start(s);
922 else
923 ivtv_dma_enc_start(s);
924 break;
925 }
926 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
927 ivtv_udma_start(itv);
928 }
929 }
930
Hans Verkuildc02d502007-05-19 14:07:16 -0300931 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
Hans Verkuil33bc4de2007-08-18 11:36:09 -0300932 itv->irq_rr_idx++;
Hans Verkuildc02d502007-05-19 14:07:16 -0300933 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
Hans Verkuil33bc4de2007-08-18 11:36:09 -0300934 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
Hans Verkuildc02d502007-05-19 14:07:16 -0300935 struct ivtv_stream *s = &itv->streams[idx];
936
937 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
938 continue;
939 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
940 ivtv_dma_enc_start(s);
941 break;
942 }
943 }
944
945 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
946 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
947
Hans Verkuil1a0adaf2007-04-27 12:31:25 -0300948 spin_unlock(&itv->dma_reg_lock);
949
950 /* If we've just handled a 'forced' vsync, it's safest to say it
951 * wasn't ours. Another device may have triggered it at just
952 * the right time.
953 */
954 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
955}
956
957void ivtv_unfinished_dma(unsigned long arg)
958{
959 struct ivtv *itv = (struct ivtv *)arg;
960
961 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
962 return;
963 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
964
965 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
966 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
967 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
968 itv->cur_dma_stream = -1;
969 wake_up(&itv->dma_waitq);
970}