2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
35 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
36 MODULE_AUTHOR("Steven Toth <stoth@hauppauge.com>");
37 MODULE_LICENSE("GPL");
39 static unsigned int debug = 0;
40 module_param(debug,int,0644);
41 MODULE_PARM_DESC(debug,"enable debug messages");
43 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
44 module_param_array(card, int, NULL, 0444);
45 MODULE_PARM_DESC(card,"card type");
47 #define dprintk(level,fmt, arg...) if (debug >= level) \
48 printk(KERN_DEBUG "%s/0: " fmt, dev->name , ## arg)
50 static unsigned int cx23885_devcount;
52 static DEFINE_MUTEX(devlist);
53 static LIST_HEAD(cx23885_devlist);
55 #define NO_SYNC_LINE (-1U)
59 * 1 line = 16 bytes of CDT
61 * cdt size = 16 * linesize
66 * 0x00000000 0x00008fff FIFO clusters
67 * 0x00010000 0x000104af Channel Management Data Structures
68 * 0x000104b0 0x000104ff Free
69 * 0x00010500 0x000108bf 15 channels * iqsize
70 * 0x000108c0 0x000108ff Free
71 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
72 * 15 channels * (iqsize + (maxlines * linesize))
73 * 0x00010ea0 0x00010xxx Free
76 struct sram_channel cx23885_sram_channels[] = {
79 .cmds_start = 0x10000,
80 .ctrl_start = 0x10500,
84 .ptr1_reg = DMA1_PTR1,
85 .ptr2_reg = DMA1_PTR2,
86 .cnt1_reg = DMA1_CNT1,
87 .cnt2_reg = DMA1_CNT2,
97 .ptr1_reg = DMA2_PTR1,
98 .ptr2_reg = DMA2_PTR2,
99 .cnt1_reg = DMA2_CNT1,
100 .cnt2_reg = DMA2_CNT2,
109 .ptr1_reg = DMA3_PTR1,
110 .ptr2_reg = DMA3_PTR2,
111 .cnt1_reg = DMA3_CNT1,
112 .cnt2_reg = DMA3_CNT2,
121 .ptr1_reg = DMA4_PTR1,
122 .ptr2_reg = DMA4_PTR2,
123 .cnt1_reg = DMA4_CNT1,
124 .cnt2_reg = DMA4_CNT2,
133 .ptr1_reg = DMA5_PTR1,
134 .ptr2_reg = DMA5_PTR2,
135 .cnt1_reg = DMA5_CNT1,
136 .cnt2_reg = DMA5_CNT2,
140 .cmds_start = 0x10140,
141 .ctrl_start = 0x10680,
143 .fifo_start = 0x6000,
145 .ptr1_reg = DMA5_PTR1,
146 .ptr2_reg = DMA5_PTR2,
147 .cnt1_reg = DMA5_CNT1,
148 .cnt2_reg = DMA5_CNT2,
157 .ptr1_reg = DMA6_PTR1,
158 .ptr2_reg = DMA6_PTR2,
159 .cnt1_reg = DMA6_CNT1,
160 .cnt2_reg = DMA6_CNT2,
169 .ptr1_reg = DMA7_PTR1,
170 .ptr2_reg = DMA7_PTR2,
171 .cnt1_reg = DMA7_CNT1,
172 .cnt2_reg = DMA7_CNT2,
181 .ptr1_reg = DMA8_PTR1,
182 .ptr2_reg = DMA8_PTR2,
183 .cnt1_reg = DMA8_CNT1,
184 .cnt2_reg = DMA8_CNT2,
188 /* FIXME, these allocations will change when
189 * analog arrives. The be reviewed.
190 * CX23887 Assumptions
191 * 1 line = 16 bytes of CDT
193 * cdt size = 16 * linesize
198 * 0x00000000 0x00008fff FIFO clusters
199 * 0x00010000 0x000104af Channel Management Data Structures
200 * 0x000104b0 0x000104ff Free
201 * 0x00010500 0x000108bf 15 channels * iqsize
202 * 0x000108c0 0x000108ff Free
203 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
204 * 15 channels * (iqsize + (maxlines * linesize))
205 * 0x00010ea0 0x00010xxx Free
208 struct sram_channel cx23887_sram_channels[] = {
216 .ptr1_reg = DMA1_PTR1,
217 .ptr2_reg = DMA1_PTR2,
218 .cnt1_reg = DMA1_CNT1,
219 .cnt2_reg = DMA1_CNT2,
228 .ptr1_reg = DMA2_PTR1,
229 .ptr2_reg = DMA2_PTR2,
230 .cnt1_reg = DMA2_CNT1,
231 .cnt2_reg = DMA2_CNT2,
240 .ptr1_reg = DMA3_PTR1,
241 .ptr2_reg = DMA3_PTR2,
242 .cnt1_reg = DMA3_CNT1,
243 .cnt2_reg = DMA3_CNT2,
252 .ptr1_reg = DMA4_PTR1,
253 .ptr2_reg = DMA4_PTR2,
254 .cnt1_reg = DMA4_CNT1,
255 .cnt2_reg = DMA4_CNT2,
264 .ptr1_reg = DMA5_PTR1,
265 .ptr2_reg = DMA5_PTR2,
266 .cnt1_reg = DMA5_CNT1,
267 .cnt2_reg = DMA5_CNT2,
271 .cmds_start = 0x10140,
272 .ctrl_start = 0x10680,
274 .fifo_start = 0x6000,
276 .ptr1_reg = DMA5_PTR1,
277 .ptr2_reg = DMA5_PTR2,
278 .cnt1_reg = DMA5_CNT1,
279 .cnt2_reg = DMA5_CNT2,
288 .ptr1_reg = DMA6_PTR1,
289 .ptr2_reg = DMA6_PTR2,
290 .cnt1_reg = DMA6_CNT1,
291 .cnt2_reg = DMA6_CNT2,
300 .ptr1_reg = DMA7_PTR1,
301 .ptr2_reg = DMA7_PTR2,
302 .cnt1_reg = DMA7_CNT1,
303 .cnt2_reg = DMA7_CNT2,
312 .ptr1_reg = DMA8_PTR1,
313 .ptr2_reg = DMA8_PTR2,
314 .cnt1_reg = DMA8_CNT1,
315 .cnt2_reg = DMA8_CNT2,
319 static int cx23885_risc_decode(u32 risc)
321 static char *instr[16] = {
322 [ RISC_SYNC >> 28 ] = "sync",
323 [ RISC_WRITE >> 28 ] = "write",
324 [ RISC_WRITEC >> 28 ] = "writec",
325 [ RISC_READ >> 28 ] = "read",
326 [ RISC_READC >> 28 ] = "readc",
327 [ RISC_JUMP >> 28 ] = "jump",
328 [ RISC_SKIP >> 28 ] = "skip",
329 [ RISC_WRITERM >> 28 ] = "writerm",
330 [ RISC_WRITECM >> 28 ] = "writecm",
331 [ RISC_WRITECR >> 28 ] = "writecr",
333 static int incr[16] = {
334 [ RISC_WRITE >> 28 ] = 3, // 2
335 [ RISC_JUMP >> 28 ] = 3, // 2
336 [ RISC_SKIP >> 28 ] = 1,
337 [ RISC_SYNC >> 28 ] = 1,
338 [ RISC_WRITERM >> 28 ] = 3,
339 [ RISC_WRITECM >> 28 ] = 3,
340 [ RISC_WRITECR >> 28 ] = 4,
342 static char *bits[] = {
343 "12", "13", "14", "resync",
344 "cnt0", "cnt1", "18", "19",
345 "20", "21", "22", "23",
346 "irq1", "irq2", "eol", "sol",
350 printk("0x%08x [ %s", risc,
351 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
352 for (i = ARRAY_SIZE(bits)-1; i >= 0; i--)
353 if (risc & (1 << (i + 12)))
354 printk(" %s",bits[i]);
355 printk(" count=%d ]\n", risc & 0xfff);
356 return incr[risc >> 28] ? incr[risc >> 28] : 1;
359 void cx23885_wakeup(struct cx23885_tsport *port,
360 struct cx23885_dmaqueue *q, u32 count)
362 struct cx23885_dev *dev = port->dev;
363 struct cx23885_buffer *buf;
366 for (bc = 0;; bc++) {
367 if (list_empty(&q->active))
369 buf = list_entry(q->active.next,
370 struct cx23885_buffer, vb.queue);
371 /* count comes from the hw and is is 16bit wide --
372 * this trick handles wrap-arounds correctly for
373 * up to 32767 buffers in flight... */
374 if ((s16) (count - buf->count) < 0)
376 do_gettimeofday(&buf->vb.ts);
377 dprintk(2,"[%p/%d] wakeup reg=%d buf=%d\n",buf,buf->vb.i,
379 buf->vb.state = STATE_DONE;
380 list_del(&buf->vb.queue);
381 wake_up(&buf->vb.done);
383 if (list_empty(&q->active)) {
384 del_timer(&q->timeout);
386 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
389 printk("%s: %d buffers handled (should be 1)\n",__FUNCTION__,bc);
391 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
392 struct sram_channel *ch);
394 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
395 struct sram_channel *ch,
396 unsigned int bpl, u32 risc)
398 unsigned int i,lines;
401 if (ch->cmds_start == 0)
403 dprintk(1, "%s() Erasing channel [%s]\n",__FUNCTION__, ch->name);
404 cx_write(ch->ptr1_reg, 0);
405 cx_write(ch->ptr2_reg, 0);
406 cx_write(ch->cnt2_reg, 0);
407 cx_write(ch->cnt1_reg, 0);
410 dprintk(1, "%s() Configuring channel [%s]\n",__FUNCTION__, ch->name);
413 bpl = (bpl + 7) & ~7; /* alignment */
415 lines = ch->fifo_size / bpl;
420 cx_write(8+0, cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC) );
421 cx_write(8+4, cpu_to_le32(8) );
422 cx_write(8+8, cpu_to_le32(0) );
425 for (i = 0; i < lines; i++) {
426 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __FUNCTION__, cdt + 16*i, ch->fifo_start + bpl*i);
427 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
428 cx_write(cdt + 16*i + 4, 0);
429 cx_write(cdt + 16*i + 8, 0);
430 cx_write(cdt + 16*i + 12, 0);
435 cx_write(ch->cmds_start + 0, 8);
437 cx_write(ch->cmds_start + 0, risc);
438 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
439 cx_write(ch->cmds_start + 8, cdt);
440 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
441 cx_write(ch->cmds_start + 16, ch->ctrl_start);
443 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2) );
445 cx_write(ch->cmds_start + 20, 64 >> 2);
446 for (i = 24; i < 80; i += 4)
447 cx_write(ch->cmds_start + i, 0);
450 cx_write(ch->ptr1_reg, ch->fifo_start);
451 cx_write(ch->ptr2_reg, cdt);
452 cx_write(ch->cnt2_reg, (lines*16) >> 3);
453 cx_write(ch->cnt1_reg, (bpl >> 3) -1);
455 dprintk(2,"[bridged %d] sram setup %s: bpl=%d lines=%d\n",
456 cx23885_boards[dev->board].bridge,
464 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
465 struct sram_channel *ch)
467 static char *name[] = {
486 printk("%s: %s - dma channel status dump\n",
487 dev->name, ch->name);
488 for (i = 0; i < ARRAY_SIZE(name); i++)
489 printk("%s: cmds: %-15s: 0x%08x\n",
491 cx_read(ch->cmds_start + 4*i));
493 for (i = 0; i < 4; i++) {
494 risc = cx_read(ch->cmds_start + 4 * (i+14));
495 printk("%s: risc%d: ", dev->name, i);
496 cx23885_risc_decode(risc);
498 for (i = 0; i < (64 >> 2); i += n) {
499 risc = cx_read(ch->ctrl_start + 4 * i); /* No consideration for bits 63-32 */
500 printk("%s: (0x%08x) iq %x: ", dev->name, ch->ctrl_start + 4 * i, i);
501 n = cx23885_risc_decode(risc);
502 for (j = 1; j < n; j++) {
503 risc = cx_read(ch->ctrl_start + 4 * (i+j));
504 printk("%s: iq %x: 0x%08x [ arg #%d ]\n",
505 dev->name, i+j, risc, j);
509 printk("%s: fifo: 0x%08x -> 0x%x\n",
510 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
511 printk("%s: ctrl: 0x%08x -> 0x%x\n",
512 dev->name, ch->ctrl_start, ch->ctrl_start+6*16);
513 printk("%s: ptr1_reg: 0x%08x\n",
514 dev->name, cx_read(ch->ptr1_reg));
515 printk("%s: ptr2_reg: 0x%08x\n",
516 dev->name, cx_read(ch->ptr2_reg));
517 printk("%s: cnt1_reg: 0x%08x\n",
518 dev->name, cx_read(ch->cnt1_reg));
519 printk("%s: cnt2_reg: 0x%08x\n",
520 dev->name, cx_read(ch->cnt2_reg));
523 void cx23885_risc_disasm(struct cx23885_tsport *port, struct btcx_riscmem *risc)
525 struct cx23885_dev *dev = port->dev;
528 printk("%s: risc disasm: %p [dma=0x%08lx]\n",
529 dev->name, risc->cpu, (unsigned long)risc->dma);
530 for (i = 0; i < (risc->size >> 2); i += n) {
531 printk("%s: %04d: ", dev->name, i);
532 n = cx23885_risc_decode(risc->cpu[i]);
533 for (j = 1; j < n; j++)
534 printk("%s: %04d: 0x%08x [ arg #%d ]\n",
535 dev->name, i+j, risc->cpu[i+j], j);
536 if (risc->cpu[i] == RISC_JUMP)
541 void cx23885_shutdown(struct cx23885_dev *dev)
543 /* disable RISC controller */
544 cx_write(DEV_CNTRL2, 0);
546 /* Disable all IR activity */
547 cx_write(IR_CNTRL_REG, 0);
549 /* Disable Video A/B activity */
550 cx_write(VID_A_DMA_CTL, 0);
551 cx_write(VID_B_DMA_CTL, 0);
552 cx_write(VID_C_DMA_CTL, 0);
554 /* Disable Audio activity */
555 cx_write(AUD_INT_DMA_CTL, 0);
556 cx_write(AUD_EXT_DMA_CTL, 0);
558 /* Disable Serial port */
559 cx_write(UART_CTL, 0);
561 /* Disable Interrupts */
562 cx_write(PCI_INT_MSK, 0);
563 cx_write(VID_A_INT_MSK, 0);
564 cx_write(VID_B_INT_MSK, 0);
565 cx_write(VID_C_INT_MSK, 0);
566 cx_write(AUDIO_INT_INT_MSK, 0);
567 cx_write(AUDIO_EXT_INT_MSK, 0);
571 void cx23885_reset(struct cx23885_dev *dev)
573 dprintk(1, "%s()\n", __FUNCTION__);
575 cx23885_shutdown(dev);
577 cx_write(PCI_INT_STAT, 0xffffffff);
578 cx_write(VID_A_INT_STAT, 0xffffffff);
579 cx_write(VID_B_INT_STAT, 0xffffffff);
580 cx_write(VID_C_INT_STAT, 0xffffffff);
581 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
582 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
583 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
588 cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH01 ], 188*4, 0);
589 cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH02 ], 128, 0);
590 cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH03 ], 128, 0);
591 cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH04 ], 128, 0);
592 cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH05 ], 128, 0);
593 cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH06 ], 188*4, 0);
594 cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH07 ], 128, 0);
595 cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH08 ], 128, 0);
596 cx23885_sram_channel_setup(dev, &dev->sram_channels[ SRAM_CH09 ], 128, 0);
599 // FIXME: Put a pointer to the sram_channel table in cx23885_dev
600 // and stop all this ugly switch/if code
601 switch(cx23885_boards[dev->board].bridge) {
602 case CX23885_BRIDGE_885:
603 cx23885_sram_channel_setup(dev, &cx23885_sram_channels[ SRAM_CH01 ], 188*4, 0);
604 cx23885_sram_channel_setup(dev, &cx23885_sram_channels[ SRAM_CH02 ], 128, 0);
605 cx23885_sram_channel_setup(dev, &cx23885_sram_channels[ SRAM_CH03 ], 128, 0);
606 cx23885_sram_channel_setup(dev, &cx23885_sram_channels[ SRAM_CH04 ], 128, 0);
607 cx23885_sram_channel_setup(dev, &cx23885_sram_channels[ SRAM_CH05 ], 128, 0);
608 cx23885_sram_channel_setup(dev, &cx23885_sram_channels[ SRAM_CH06 ], 188*4, 0);
609 cx23885_sram_channel_setup(dev, &cx23885_sram_channels[ SRAM_CH07 ], 128, 0);
610 cx23885_sram_channel_setup(dev, &cx23885_sram_channels[ SRAM_CH08 ], 128, 0);
611 cx23885_sram_channel_setup(dev, &cx23885_sram_channels[ SRAM_CH09 ], 128, 0);
613 case CX23885_BRIDGE_887:
614 cx23885_sram_channel_setup(dev, &cx23887_sram_channels[ SRAM_CH01 ], 188*4, 0);
615 cx23885_sram_channel_setup(dev, &cx23887_sram_channels[ SRAM_CH02 ], 128, 0);
616 cx23885_sram_channel_setup(dev, &cx23887_sram_channels[ SRAM_CH03 ], 128, 0);
617 cx23885_sram_channel_setup(dev, &cx23887_sram_channels[ SRAM_CH04 ], 128, 0);
618 cx23885_sram_channel_setup(dev, &cx23887_sram_channels[ SRAM_CH05 ], 128, 0);
619 cx23885_sram_channel_setup(dev, &cx23887_sram_channels[ SRAM_CH06 ], 188*4, 0);
620 cx23885_sram_channel_setup(dev, &cx23887_sram_channels[ SRAM_CH07 ], 128, 0);
621 cx23885_sram_channel_setup(dev, &cx23887_sram_channels[ SRAM_CH08 ], 128, 0);
622 cx23885_sram_channel_setup(dev, &cx23887_sram_channels[ SRAM_CH09 ], 128, 0);
625 printk(KERN_ERR "%s() error, default case", __FUNCTION__ );
630 case CX23885_BOARD_HAUPPAUGE_HVR1800:
633 /* GPIO-2 8295A Reset */
634 /* GPIO-3-10 cx23417 data0-7 */
635 /* GPIO-11-14 cx23417 addr0-3 */
636 /* GPIO-15-18 cx23417 READY, CS, RD, WR */
638 dprintk( 1, "%s() Configuring HVR1800 GPIO's\n", __FUNCTION__);
639 // FIXME: Analog requires the tuner is brought out of reset
645 static int cx23885_pci_quirks(struct cx23885_dev *dev)
647 dprintk(1, "%s()\n", __FUNCTION__);
650 case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
651 cx_clear(RDR_TLCTL0, 1 << 4);
657 static int get_resources(struct cx23885_dev *dev)
659 if (request_mem_region(pci_resource_start(dev->pci,0),
660 pci_resource_len(dev->pci,0),
664 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
665 dev->name, (unsigned long long)pci_resource_start(dev->pci,0));
670 static void cx23885_timeout(unsigned long data);
671 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
672 u32 reg, u32 mask, u32 value);
674 static int cx23885_ir_init(struct cx23885_dev *dev)
676 dprintk(1, "%s()\n", __FUNCTION__);
678 switch (dev->board) {
679 case CX23885_BOARD_HAUPPAUGE_HVR1800:
680 dprintk(1, "%s() FIXME - Implement IR support\n", __FUNCTION__);
687 static int cx23885_dev_setup(struct cx23885_dev *dev)
691 mutex_init(&dev->lock);
693 atomic_inc(&dev->refcount);
695 dev->nr = cx23885_devcount++;
696 dev->pci_bus = dev->pci->bus->number;
697 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
698 dev->pci_irqmask = 0x001f00;
700 /* External Master 1 Bus */
701 dev->i2c_bus[0].nr = 0;
702 dev->i2c_bus[0].dev = dev;
703 dev->i2c_bus[0].reg_stat = I2C1_STAT;
704 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
705 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
706 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
707 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
708 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
710 /* External Master 2 Bus */
711 dev->i2c_bus[1].nr = 1;
712 dev->i2c_bus[1].dev = dev;
713 dev->i2c_bus[1].reg_stat = I2C2_STAT;
714 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
715 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
716 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
717 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
718 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
720 /* Internal Master 3 Bus */
721 dev->i2c_bus[2].nr = 2;
722 dev->i2c_bus[2].dev = dev;
723 dev->i2c_bus[2].reg_stat = I2C3_STAT;
724 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
725 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
726 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
727 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
728 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
730 /* Transport bus init dma queue */
731 spin_lock_init(&dev->ts2.slock);
734 dev->ts2.sram_chno = SRAM_CH06;
735 INIT_LIST_HEAD(&dev->ts2.mpegq.active);
736 INIT_LIST_HEAD(&dev->ts2.mpegq.queued);
737 dev->ts2.mpegq.timeout.function = cx23885_timeout;
738 dev->ts2.mpegq.timeout.data = (unsigned long)&dev->ts2;
739 init_timer(&dev->ts2.mpegq.timeout);
741 dev->ts2.reg_gpcnt = VID_C_GPCNT;
742 dev->ts2.reg_gpcnt_ctl = VID_C_GPCNT_CTL;
743 dev->ts2.reg_dma_ctl = VID_C_DMA_CTL;
744 dev->ts2.reg_lngth = VID_C_LNGTH;
745 dev->ts2.reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
746 dev->ts2.reg_gen_ctrl = VID_C_GEN_CTL;
747 dev->ts2.reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
748 dev->ts2.reg_sop_status = VID_C_SOP_STATUS;
749 dev->ts2.reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
750 dev->ts2.reg_vld_misc = VID_C_VLD_MISC;
751 dev->ts2.reg_ts_clk_en = VID_C_TS_CLK_EN;
752 dev->ts2.reg_ts_int_msk = VID_C_INT_MSK;
754 // FIXME: Make this board specific
755 dev->ts2.pci_irqmask = 0x04; /* TS Port 2 bit */
756 dev->ts2.dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
757 dev->ts2.ts_int_msk_val = 0x1111; /* TS port bits for RISC */
758 dev->ts2.gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
759 dev->ts2.ts_clk_en_val = 0x1; /* Enable TS_CLK */
761 cx23885_risc_stopper(dev->pci, &dev->ts2.mpegq.stopper, dev->ts2.reg_dma_ctl, dev->ts2.dma_ctl_val, 0x00);
763 sprintf(dev->name,"cx23885[%d]", dev->nr);
765 if (get_resources(dev) < 0) {
766 printk(KERN_ERR "CORE %s No more PCIe resources for "
767 "subsystem: %04x:%04x\n",
768 dev->name, dev->pci->subsystem_vendor,
769 dev->pci->subsystem_device);
775 mutex_lock(&devlist);
776 list_add_tail(&dev->devlist, &cx23885_devlist);
777 mutex_unlock(&devlist);
780 dev->lmmio = ioremap(pci_resource_start(dev->pci,0),
781 pci_resource_len(dev->pci,0));
783 dev->bmmio = (u8 __iomem *)dev->lmmio;
785 cx23885_pci_quirks(dev);
789 if (card[dev->nr] < cx23885_bcount)
790 dev->board = card[dev->nr];
791 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
792 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
793 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
794 dev->board = cx23885_subids[i].card;
795 if (UNSET == dev->board) {
796 dev->board = CX23885_BOARD_UNKNOWN;
797 cx23885_card_list(dev);
799 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
800 dev->name, dev->pci->subsystem_vendor,
801 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
802 dev->board, card[dev->nr] == dev->board ?
803 "insmod option" : "autodetected");
805 /* Configure the hardware internal memory for fifos */
806 switch(cx23885_boards[dev->board].bridge) {
807 case CX23885_BRIDGE_UNDEFINED:
808 case CX23885_BRIDGE_885:
809 dev->sram_channels = cx23885_sram_channels;
811 case CX23885_BRIDGE_887:
812 dev->sram_channels = cx23887_sram_channels;
815 printk(KERN_ERR "%s() error, default case", __FUNCTION__ );
821 cx23885_i2c_register(&dev->i2c_bus[0]);
822 cx23885_i2c_register(&dev->i2c_bus[1]);
823 cx23885_i2c_register(&dev->i2c_bus[2]);
824 cx23885_call_i2c_clients (&dev->i2c_bus[0], TUNER_SET_STANDBY, NULL);
826 cx23885_card_setup(dev);
827 cx23885_ir_init(dev);
829 if (cx23885_dvb_register(&dev->ts2) < 0) {
830 printk(KERN_ERR "%s() Failed to register dvb adapters\n", __FUNCTION__);
840 void cx23885_dev_unregister(struct cx23885_dev *dev)
842 release_mem_region(pci_resource_start(dev->pci,0),
843 pci_resource_len(dev->pci,0));
845 if (!atomic_dec_and_test(&dev->refcount))
848 cx23885_dvb_unregister(&dev->ts2);
849 cx23885_i2c_unregister(&dev->i2c_bus[2]);
850 cx23885_i2c_unregister(&dev->i2c_bus[1]);
851 cx23885_i2c_unregister(&dev->i2c_bus[0]);
856 static u32* cx23885_risc_field(u32 *rp, struct scatterlist *sglist,
857 unsigned int offset, u32 sync_line,
858 unsigned int bpl, unsigned int padding,
861 struct scatterlist *sg;
862 unsigned int line,todo;
864 /* sync instruction */
865 if (sync_line != NO_SYNC_LINE)
866 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
870 for (line = 0; line < lines; line++) {
871 while (offset && offset >= sg_dma_len(sg)) {
872 offset -= sg_dma_len(sg);
875 if (bpl <= sg_dma_len(sg)-offset) {
876 /* fits into current chunk */
877 *(rp++)=cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
878 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
879 *(rp++)=cpu_to_le32(0); /* bits 63-32 */
882 /* scanline needs to be split */
884 *(rp++)=cpu_to_le32(RISC_WRITE|RISC_SOL|
885 (sg_dma_len(sg)-offset));
886 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
887 *(rp++)=cpu_to_le32(0); /* bits 63-32 */
888 todo -= (sg_dma_len(sg)-offset);
891 while (todo > sg_dma_len(sg)) {
892 *(rp++)=cpu_to_le32(RISC_WRITE|
894 *(rp++)=cpu_to_le32(sg_dma_address(sg));
895 *(rp++)=cpu_to_le32(0); /* bits 63-32 */
896 todo -= sg_dma_len(sg);
899 *(rp++)=cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
900 *(rp++)=cpu_to_le32(sg_dma_address(sg));
901 *(rp++)=cpu_to_le32(0); /* bits 63-32 */
910 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
911 struct scatterlist *sglist,
912 unsigned int top_offset, unsigned int bottom_offset,
913 unsigned int bpl, unsigned int padding, unsigned int lines)
915 u32 instructions,fields;
920 if (UNSET != top_offset)
922 if (UNSET != bottom_offset)
925 /* estimate risc mem: worst case is one write per page border +
926 one write per scan line + syncs + jump (all 2 dwords). Padding
927 can cause next bpl to start close to a page border. First DMA
928 region may be smaller than PAGE_SIZE */
929 /* write and jump need and extra dword */
930 instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines);
932 //if ((rc = btcx_riscmem_alloc(pci,risc,instructions*8)) < 0)
933 if ((rc = btcx_riscmem_alloc(pci,risc,instructions*12)) < 0)
936 /* write risc instructions */
938 if (UNSET != top_offset)
939 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
940 bpl, padding, lines);
941 if (UNSET != bottom_offset)
942 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
943 bpl, padding, lines);
945 /* save pointer to jmp instruction address */
947 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
951 int cx23885_risc_databuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
952 struct scatterlist *sglist, unsigned int bpl,
959 /* estimate risc mem: worst case is one write per page border +
960 one write per scan line + syncs + jump (all 2 dwords). Here
961 there is no padding and no sync. First DMA region may be smaller
963 /* Jump and write need an extra dword */
964 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
967 //if ((rc = btcx_riscmem_alloc(pci,risc,instructions*8)) < 0)
968 if ((rc = btcx_riscmem_alloc(pci,risc,instructions*12)) < 0)
971 /* write risc instructions */
973 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
975 /* save pointer to jmp instruction address */
977 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
981 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
982 u32 reg, u32 mask, u32 value)
987 if ((rc = btcx_riscmem_alloc(pci, risc, 4*16)) < 0)
990 /* write risc instructions */
992 //*(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2 | RISC_IMM);
993 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
994 *(rp++) = cpu_to_le32(reg);
995 *(rp++) = cpu_to_le32(value);
996 *(rp++) = cpu_to_le32(mask);
997 *(rp++) = cpu_to_le32(RISC_JUMP);
998 *(rp++) = cpu_to_le32(risc->dma);
999 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1003 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1005 BUG_ON(in_interrupt());
1006 videobuf_waiton(&buf->vb,0,0);
1007 videobuf_dma_unmap(q, &buf->vb.dma);
1008 videobuf_dma_free(&buf->vb.dma);
1009 btcx_riscmem_free((struct pci_dev *)q->dev, &buf->risc);
1010 buf->vb.state = STATE_NEEDS_INIT;
1013 static int cx23885_start_dma(struct cx23885_tsport *port,
1014 struct cx23885_dmaqueue *q,
1015 struct cx23885_buffer *buf)
1017 struct cx23885_dev *dev = port->dev;
1019 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __FUNCTION__,
1020 buf->vb.width, buf->vb.height, buf->vb.field);
1023 /* setup fifo + format */
1024 cx23885_sram_channel_setup(dev,
1025 &dev->sram_channels[ port->sram_chno ],
1026 port->ts_packet_size, buf->risc.dma);
1028 cx23885_sram_channel_dump(dev, &dev->sram_channels[ port->sram_chno ] );
1030 // FIXME: Put a pointer to the sram_channel table in cx23885_dev
1031 // and stop all this ugly switch/if code
1032 switch(cx23885_boards[dev->board].bridge) {
1033 case CX23885_BRIDGE_885:
1034 cx23885_sram_channel_setup(dev,
1035 &cx23885_sram_channels[ port->sram_chno ],
1036 port->ts_packet_size, buf->risc.dma);
1038 cx23885_sram_channel_dump(dev, &cx23885_sram_channels[ port->sram_chno ] );
1040 case CX23885_BRIDGE_887:
1041 cx23885_sram_channel_setup(dev,
1042 &cx23887_sram_channels[ port->sram_chno ],
1043 port->ts_packet_size, buf->risc.dma);
1045 cx23885_sram_channel_dump(dev, &cx23887_sram_channels[ port->sram_chno ] );
1048 printk(KERN_ERR "%s() error, default case", __FUNCTION__ );
1053 cx23885_risc_disasm(port, &buf->risc);
1055 /* write TS length to chip */
1056 cx_write(port->reg_lngth, buf->vb.width);
1058 if (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB)) {
1059 printk( "%s() Failed. Unsupported value in .portc (0x%08x)\n", __FUNCTION__,
1060 cx23885_boards[dev->board].portc );
1064 // FIXME: review the need for these two lines
1065 dprintk( 1, "%s() doing .dvb\n", __FUNCTION__);
1068 cx_write(port->reg_hw_sop_ctrl, 0x47 << 16 | 188 << 4);
1069 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1071 // FIXME: review the need for this
1072 cx_write(GPIO2, 0x00);
1074 switch (dev->board) {
1075 case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
1076 case CX23885_BOARD_HAUPPAUGE_HVR1800:
1077 cx_write(port->reg_vld_misc, 0x00);
1078 dprintk(1, "%s() Configuring HVR1800/lp/1500 board\n", __FUNCTION__);
1082 printk(KERN_ERR "%s() error, default case", __FUNCTION__ );
1085 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1088 /* reset counter to zero */
1089 cx_write(port->reg_gpcnt_ctl, 3);
1092 /* A bug in the current 887 implementation, causes an NMI assert during
1093 * starting or stopping interrupts or dma. Avoid the bug for the time being,
1094 * enabling the developer to work on the demod/tuner locking work.
1096 switch(cx23885_boards[dev->board].bridge) {
1097 case CX23885_BRIDGE_885:
1099 dprintk(1, "%s() enabling TS int's and DMA\n", __FUNCTION__ );
1100 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1101 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1102 cx_set(PCI_INT_MSK, dev->pci_irqmask | port->pci_irqmask);
1104 case CX23885_BRIDGE_887:
1106 dprintk(1, "%s() NOT enabling TS int's and DMA, NMI bug\n", __FUNCTION__ );
1109 // FIXME: generate a sensible switch-default message
1110 printk(KERN_ERR "%s() error, default case", __FUNCTION__ );
1113 dprintk(1, "%s() Register Dump\n", __FUNCTION__);
1114 dprintk(1, "%s() set port ts_int_msk, now %x\n", __FUNCTION__, cx_read(port->reg_ts_int_msk) );
1115 dprintk(1, "%s() DEV_CNTRL2 0x%08x\n", __FUNCTION__, cx_read(DEV_CNTRL2) );
1116 dprintk(1, "%s() PCI_INT_MSK 0x%08x\n", __FUNCTION__, cx_read(PCI_INT_MSK) );
1117 dprintk(1, "%s() VID_A_INT_MSK 0x%08x\n", __FUNCTION__, cx_read(VID_A_INT_MSK) );
1118 dprintk(1, "%s() VID_B_INT_MSK 0x%08x\n", __FUNCTION__, cx_read(VID_B_INT_MSK) );
1119 dprintk(1, "%s() VID_C_INT_MSK 0x%08x\n", __FUNCTION__, cx_read(VID_C_INT_MSK) );
1120 dprintk(1, "%s() VID_A_DMA_CTL 0x%08x\n", __FUNCTION__, cx_read(VID_A_DMA_CTL) );
1121 dprintk(1, "%s() VID_B_DMA_CTL 0x%08x\n", __FUNCTION__, cx_read(VID_B_DMA_CTL) );
1122 dprintk(1, "%s() VID_C_DMA_CTL 0x%08x\n", __FUNCTION__, cx_read(VID_C_DMA_CTL) );
1123 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08x\n", __FUNCTION__, cx_read(AUDIO_INT_INT_MSK) );
1124 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08x\n", __FUNCTION__, cx_read(AUD_INT_DMA_CTL) );
1125 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08x\n", __FUNCTION__, cx_read(AUDIO_EXT_INT_MSK) );
1126 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08x\n", __FUNCTION__, cx_read(AUD_EXT_DMA_CTL) );
1128 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1130 dprintk(1, "%s() set dev_cntrl2, now %x\n", __FUNCTION__, cx_read(DEV_CNTRL2) );
1131 dprintk(1, "%s() VID_C_DMA_CTL , now %x\n", __FUNCTION__, cx_read(port->reg_dma_ctl) );
1132 dprintk(1, "%s() VID_C_DMA_CTL , now %x\n", __FUNCTION__, cx_read(VID_C_DMA_CTL) );
1133 dprintk(1, "%s() PAD_CTRL %x\n", __FUNCTION__, cx_read(PAD_CTRL) );
1134 dprintk(1, "%s() GPIO2 %x\n", __FUNCTION__, cx_read(GPIO2) );
1135 dprintk(1, "%s() VID_C_LN_LNGTH , now %x\n", __FUNCTION__, cx_read(port->reg_lngth) );
1136 dprintk(1, "%s() VID_C_HW_SOP_CTL, now %x\n", __FUNCTION__, cx_read(port->reg_hw_sop_ctrl) );
1137 dprintk(1, "%s() VID_C_GEN_CTL , now %x\n", __FUNCTION__, cx_read(port->reg_gen_ctrl) );
1138 dprintk(1, "%s() VID_C_SOP_STATUS, now %x\n", __FUNCTION__, cx_read(VID_C_SOP_STATUS) );
1139 dprintk(1, "%s() VID_C_TS_CLK_EN , now %x\n", __FUNCTION__, cx_read(VID_C_TS_CLK_EN) );
1140 dprintk(1, "%s() VID_C_FIFO_OVLST, now %x\n", __FUNCTION__, cx_read(VID_C_FIFO_OVFL_STAT) );
1141 dprintk(1, "%s() VID_C_INT_MSTAT , now 0x%08x\n", __FUNCTION__, cx_read(VID_C_INT_MSTAT) );
1145 static int cx23885_stop_dma(struct cx23885_tsport *port)
1147 struct cx23885_dev *dev = port->dev;
1148 dprintk(1, "%s()\n", __FUNCTION__);
1150 /* Stop interrupts and DMA */
1151 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1152 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1157 static int cx23885_restart_queue(struct cx23885_tsport *port,
1158 struct cx23885_dmaqueue *q)
1160 struct cx23885_dev *dev = port->dev;
1161 struct cx23885_buffer *buf;
1162 struct list_head *item;
1164 dprintk(5, "%s()\n", __FUNCTION__);
1165 if (list_empty(&q->active))
1167 struct cx23885_buffer *prev;
1170 dprintk(5, "%s() queue is empty\n", __FUNCTION__);
1173 if (list_empty(&q->queued))
1175 buf = list_entry(q->queued.next, struct cx23885_buffer, vb.queue);
1177 list_del(&buf->vb.queue);
1178 list_add_tail(&buf->vb.queue,&q->active);
1179 cx23885_start_dma(port, q, buf);
1180 buf->vb.state = STATE_ACTIVE;
1181 buf->count = q->count++;
1182 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1183 dprintk(5,"[%p/%d] restart_queue - first active\n",
1186 } else if (prev->vb.width == buf->vb.width &&
1187 prev->vb.height == buf->vb.height &&
1188 prev->fmt == buf->fmt) {
1189 list_del(&buf->vb.queue);
1190 list_add_tail(&buf->vb.queue,&q->active);
1191 buf->vb.state = STATE_ACTIVE;
1192 buf->count = q->count++;
1193 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1194 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1195 dprintk(5,"[%p/%d] restart_queue - move to active\n",
1205 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1206 dprintk(2,"restart_queue [%p/%d]: restart dma\n",
1208 cx23885_start_dma(port, q, buf);
1209 list_for_each(item,&q->active) {
1210 buf = list_entry(item, struct cx23885_buffer, vb.queue);
1211 buf->count = q->count++;
1213 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1217 /* ------------------------------------------------------------------ */
1219 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1220 struct cx23885_buffer *buf, enum v4l2_field field)
1222 struct cx23885_dev *dev = port->dev;
1223 int size = port->ts_packet_size * port->ts_packet_count;
1226 dprintk(1, "%s: %p\n", __FUNCTION__, buf);
1227 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1230 if (STATE_NEEDS_INIT == buf->vb.state) {
1231 buf->vb.width = port->ts_packet_size;
1232 buf->vb.height = port->ts_packet_count;
1233 buf->vb.size = size;
1234 buf->vb.field = field /*V4L2_FIELD_TOP*/;
1236 if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL)))
1238 cx23885_risc_databuffer(dev->pci, &buf->risc,
1240 buf->vb.width, buf->vb.height);
1242 buf->vb.state = STATE_PREPARED;
1246 cx23885_free_buffer(q,buf);
1250 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1252 struct cx23885_buffer *prev;
1253 struct cx23885_dev *dev = port->dev;
1254 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1256 /* add jump to stopper */
1257 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1258 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1259 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1261 if (list_empty(&cx88q->active)) {
1262 dprintk( 1, "queue is empty - first active\n" );
1263 list_add_tail(&buf->vb.queue,&cx88q->active);
1264 cx23885_start_dma(port, cx88q, buf);
1265 buf->vb.state = STATE_ACTIVE;
1266 buf->count = cx88q->count++;
1267 mod_timer(&cx88q->timeout, jiffies+BUFFER_TIMEOUT);
1268 dprintk(1,"[%p/%d] %s - first active\n",
1269 buf, buf->vb.i, __FUNCTION__);
1272 dprintk( 1, "queue is not empty - append to active\n" );
1273 prev = list_entry(cx88q->active.prev, struct cx23885_buffer, vb.queue);
1274 list_add_tail(&buf->vb.queue,&cx88q->active);
1275 buf->vb.state = STATE_ACTIVE;
1276 buf->count = cx88q->count++;
1277 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1278 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1279 dprintk( 1, "[%p/%d] %s - append to active\n",
1280 buf, buf->vb.i, __FUNCTION__);
1284 /* ----------------------------------------------------------- */
1286 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason, int restart)
1288 struct cx23885_dev *dev = port->dev;
1289 struct cx23885_dmaqueue *q = &port->mpegq;
1290 struct cx23885_buffer *buf;
1291 unsigned long flags;
1293 spin_lock_irqsave(&port->slock,flags);
1294 while (!list_empty(&q->active)) {
1295 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1296 list_del(&buf->vb.queue);
1297 buf->vb.state = STATE_ERROR;
1298 wake_up(&buf->vb.done);
1299 dprintk(1,"[%p/%d] %s - dma=0x%08lx\n",
1300 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1304 dprintk(1, "restarting queue\n" );
1305 cx23885_restart_queue(port, q);
1307 spin_unlock_irqrestore(&port->slock,flags);
1310 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1312 struct cx23885_dev *dev = port->dev;
1313 struct cx23885_dmaqueue *q = &port->mpegq;
1315 dprintk(1, "%s()\n", __FUNCTION__ );
1316 del_timer_sync(&q->timeout);
1317 cx23885_stop_dma(port);
1318 do_cancel_buffers(port, "cancel", 0);
1321 static void cx23885_timeout(unsigned long data)
1323 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1324 struct cx23885_dev *dev = port->dev;
1326 dprintk(1, "%s()\n",__FUNCTION__);
1330 cx23885_sram_channel_dump(dev, &dev->sram_channels[ port->sram_chno ]);
1333 // FIXME: Put a pointer to the sram_channel table in cx23885_dev
1334 // and stop all this ugly switch/if code
1335 if(cx23885_boards[dev->board].bridge == CX23885_BRIDGE_885)
1336 cx23885_sram_channel_dump(dev, &cx23885_sram_channels[ port->sram_chno ]);
1337 if(cx23885_boards[dev->board].bridge == CX23885_BRIDGE_887)
1338 cx23885_sram_channel_dump(dev, &cx23887_sram_channels[ port->sram_chno ]);
1341 cx23885_stop_dma(port);
1342 do_cancel_buffers(port, "timeout", 1);
1345 #define PCI_MSK_APB_DMA (1 << 12)
1346 #define PCI_MSK_AL_WR (1 << 11)
1347 #define PCI_MSK_AL_RD (1 << 10)
1348 #define PCI_MSK_RISC_WR (1 << 9)
1349 #define PCI_MSK_RISC_RD (1 << 8)
1351 #define PCI_MSK_AUD_EXT (1 << 4)
1352 #define PCI_MSK_AUD_INT (1 << 3)
1353 #define PCI_MSK_VID_C (1 << 2)
1354 #define PCI_MSK_VID_B (1 << 1)
1355 #define PCI_MSK_VID_A 1
1357 #define VID_C_MSK_BAD_PKT (1 << 20)
1358 #define VID_C_MSK_OPC_ERR (1 << 16)
1359 #define VID_C_MSK_SYNC (1 << 12)
1360 #define VID_C_MSK_OF (1 << 8)
1361 #define VID_C_MSK_RISCI2 (1 << 4)
1362 #define VID_C_MSK_RISCI1 1
1364 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1366 struct cx23885_dev *dev = dev_id;
1367 struct cx23885_tsport *port = &dev->ts2;
1368 u32 pci_status, pci_mask;
1369 u32 ts2_status, ts2_mask;
1370 int count = 0, handled = 0;
1372 pci_status = cx_read(PCI_INT_STAT);
1373 pci_mask = cx_read(PCI_INT_MSK);
1375 ts2_status = cx_read(VID_C_INT_STAT);
1376 ts2_mask = cx_read(VID_C_INT_MSK);
1378 if ( (pci_status == 0) && (ts2_status == 0) )
1381 count = cx_read(port->reg_gpcnt);
1382 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n", pci_status, pci_mask );
1383 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n", ts2_status, ts2_mask, count );
1385 if ( (pci_status & PCI_MSK_RISC_RD) ||
1386 (pci_status & PCI_MSK_RISC_WR) ||
1387 (pci_status & PCI_MSK_AL_RD) ||
1388 (pci_status & PCI_MSK_AL_WR) ||
1389 (pci_status & PCI_MSK_APB_DMA) ||
1390 (pci_status & PCI_MSK_VID_C) ||
1391 (pci_status & PCI_MSK_VID_B) ||
1392 (pci_status & PCI_MSK_VID_A) ||
1393 (pci_status & PCI_MSK_AUD_INT) ||
1394 (pci_status & PCI_MSK_AUD_EXT) )
1397 if (pci_status & PCI_MSK_RISC_RD)
1398 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n", PCI_MSK_RISC_RD);
1399 if (pci_status & PCI_MSK_RISC_WR)
1400 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n", PCI_MSK_RISC_WR);
1401 if (pci_status & PCI_MSK_AL_RD)
1402 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n", PCI_MSK_AL_RD);
1403 if (pci_status & PCI_MSK_AL_WR)
1404 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n", PCI_MSK_AL_WR);
1405 if (pci_status & PCI_MSK_APB_DMA)
1406 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n", PCI_MSK_APB_DMA);
1407 if (pci_status & PCI_MSK_VID_C)
1408 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n", PCI_MSK_VID_C);
1409 if (pci_status & PCI_MSK_VID_B)
1410 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n", PCI_MSK_VID_B);
1411 if (pci_status & PCI_MSK_VID_A)
1412 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n", PCI_MSK_VID_A);
1413 if (pci_status & PCI_MSK_AUD_INT)
1414 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n", PCI_MSK_AUD_INT);
1415 if (pci_status & PCI_MSK_AUD_EXT)
1416 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n", PCI_MSK_AUD_EXT);
1420 if ( (ts2_status & VID_C_MSK_OPC_ERR) ||
1421 (ts2_status & VID_C_MSK_BAD_PKT) ||
1422 (ts2_status & VID_C_MSK_SYNC) ||
1423 (ts2_status & VID_C_MSK_OF))
1425 if (ts2_status & VID_C_MSK_OPC_ERR)
1426 dprintk(7, " (VID_C_MSK_OPC_ERR 0x%08x)\n", VID_C_MSK_OPC_ERR);
1427 if (ts2_status & VID_C_MSK_BAD_PKT)
1428 dprintk(7, " (VID_C_MSK_BAD_PKT 0x%08x)\n", VID_C_MSK_BAD_PKT);
1429 if (ts2_status & VID_C_MSK_SYNC)
1430 dprintk(7, " (VID_C_MSK_SYNC 0x%08x)\n", VID_C_MSK_SYNC);
1431 if (ts2_status & VID_C_MSK_OF)
1432 dprintk(7, " (VID_C_MSK_OF 0x%08x)\n", VID_C_MSK_OF);
1434 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1436 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1438 cx23885_sram_channel_dump(dev, &dev->sram_channels[ port->sram_chno ]);
1440 cx23885_sram_channel_dump(dev, &cx23885_sram_channels[ port->sram_chno ]);
1444 } else if (ts2_status & VID_C_MSK_RISCI1) {
1446 dprintk(7, " (RISCI1 0x%08x)\n", VID_C_MSK_RISCI1);
1448 spin_lock(&port->slock);
1449 count = cx_read(port->reg_gpcnt);
1450 cx23885_wakeup(port, &port->mpegq, count);
1451 spin_unlock(&port->slock);
1453 } else if (ts2_status & VID_C_MSK_RISCI2) {
1455 dprintk(7, " (RISCI2 0x%08x)\n", VID_C_MSK_RISCI2);
1457 spin_lock(&port->slock);
1458 cx23885_restart_queue(port, &port->mpegq);
1459 spin_unlock(&port->slock);
1463 cx_write(VID_C_INT_STAT, ts2_status);
1464 cx_write(PCI_INT_STAT, pci_status);
1467 return IRQ_RETVAL(handled);
1470 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
1471 const struct pci_device_id *pci_id)
1473 struct cx23885_dev *dev;
1476 dev = kzalloc(sizeof(*dev),GFP_KERNEL);
1482 if (pci_enable_device(pci_dev)) {
1487 if (cx23885_dev_setup(dev) < 0) {
1492 /* print pci info */
1493 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
1494 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
1495 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1496 "latency: %d, mmio: 0x%llx\n", dev->name,
1497 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
1498 dev->pci_lat, (unsigned long long)pci_resource_start(pci_dev,0));
1500 pci_set_master(pci_dev);
1501 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1502 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1507 err = request_irq(pci_dev->irq, cx23885_irq
1508 , IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
1510 printk(KERN_ERR "%s: can't get IRQ %d\n",
1511 dev->name, pci_dev->irq);
1515 pci_set_drvdata(pci_dev, dev);
1519 cx23885_dev_unregister(dev);
1525 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
1527 struct cx23885_dev *dev = pci_get_drvdata(pci_dev);
1529 cx23885_shutdown(dev);
1531 pci_disable_device(pci_dev);
1533 /* unregister stuff */
1534 free_irq(pci_dev->irq, dev);
1535 pci_set_drvdata(pci_dev, NULL);
1537 mutex_lock(&devlist);
1538 list_del(&dev->devlist);
1539 mutex_unlock(&devlist);
1541 cx23885_dev_unregister(dev);
1545 static struct pci_device_id cx23885_pci_tbl[] = {
1550 .subvendor = PCI_ANY_ID,
1551 .subdevice = PCI_ANY_ID,
1556 .subvendor = PCI_ANY_ID,
1557 .subdevice = PCI_ANY_ID,
1559 /* --- end of list --- */
1562 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
1564 static struct pci_driver cx23885_pci_driver = {
1566 .id_table = cx23885_pci_tbl,
1567 .probe = cx23885_initdev,
1568 .remove = __devexit_p(cx23885_finidev),
1574 static int cx23885_init(void)
1576 printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
1577 (CX88_VERSION_CODE >> 16) & 0xff,
1578 (CX88_VERSION_CODE >> 8) & 0xff,
1579 CX88_VERSION_CODE & 0xff);
1581 printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
1582 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
1584 return pci_register_driver(&cx23885_pci_driver);
1587 static void cx23885_fini(void)
1589 pci_unregister_driver(&cx23885_pci_driver);
1592 module_init(cx23885_init);
1593 module_exit(cx23885_fini);
1595 /* ----------------------------------------------------------- */
1600 * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off