ieee1394: ohci1394: remove dead CONFIG variable
[safe/jmp/linux-2.6] / drivers / ieee1394 / ohci1394.c
1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Emilie Chung <emilie.chung@axis.com>
51  *  . Tip on Async Request Filter
52  *
53  * Pascal Drolet <pascal.drolet@informission.ca>
54  *  . Various tips for optimization and functionnalities
55  *
56  * Robert Ficklin <rficklin@westengineering.com>
57  *  . Loop in irq_handler
58  *
59  * James Goodwin <jamesg@Filanet.com>
60  *  . Various tips on initialization, self-id reception, etc.
61  *
62  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63  *  . Apple PowerBook detection
64  *
65  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66  *  . Reset the board properly before leaving + misc cleanups
67  *
68  * Leon van Stuivenberg <leonvs@iae.nl>
69  *  . Bug fixes
70  *
71  * Ben Collins <bcollins@debian.org>
72  *  . Working big-endian support
73  *  . Updated to 2.4.x module scheme (PCI aswell)
74  *  . Config ROM generation
75  *
76  * Manfred Weihs <weihs@ict.tuwien.ac.at>
77  *  . Reworked code for initiating bus resets
78  *    (long, short, with or without hold-off)
79  *
80  * Nandu Santhi <contactnandu@users.sourceforge.net>
81  *  . Added support for nVidia nForce2 onboard Firewire chipset
82  *
83  */
84
85 #include <linux/kernel.h>
86 #include <linux/list.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/wait.h>
90 #include <linux/errno.h>
91 #include <linux/module.h>
92 #include <linux/moduleparam.h>
93 #include <linux/pci.h>
94 #include <linux/fs.h>
95 #include <linux/poll.h>
96 #include <asm/byteorder.h>
97 #include <asm/atomic.h>
98 #include <asm/uaccess.h>
99 #include <linux/delay.h>
100 #include <linux/spinlock.h>
101
102 #include <asm/pgtable.h>
103 #include <asm/page.h>
104 #include <asm/irq.h>
105 #include <linux/types.h>
106 #include <linux/vmalloc.h>
107 #include <linux/init.h>
108
109 #ifdef CONFIG_PPC_PMAC
110 #include <asm/machdep.h>
111 #include <asm/pmac_feature.h>
112 #include <asm/prom.h>
113 #include <asm/pci-bridge.h>
114 #endif
115
116 #include "csr1212.h"
117 #include "ieee1394.h"
118 #include "ieee1394_types.h"
119 #include "hosts.h"
120 #include "dma.h"
121 #include "iso.h"
122 #include "ieee1394_core.h"
123 #include "highlevel.h"
124 #include "ohci1394.h"
125
126 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
127 #define OHCI1394_DEBUG
128 #endif
129
130 #ifdef DBGMSG
131 #undef DBGMSG
132 #endif
133
134 #ifdef OHCI1394_DEBUG
135 #define DBGMSG(fmt, args...) \
136 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
137 #else
138 #define DBGMSG(fmt, args...) do {} while (0)
139 #endif
140
141 /* print general (card independent) information */
142 #define PRINT_G(level, fmt, args...) \
143 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
144
145 /* print card specific information */
146 #define PRINT(level, fmt, args...) \
147 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
148
149 /* Module Parameters */
150 static int phys_dma = 1;
151 module_param(phys_dma, int, 0444);
152 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
153
154 static void dma_trm_tasklet(unsigned long data);
155 static void dma_trm_reset(struct dma_trm_ctx *d);
156
157 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
158                              enum context_type type, int ctx, int num_desc,
159                              int buf_size, int split_buf_size, int context_base);
160 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
161 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
162
163 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
164                              enum context_type type, int ctx, int num_desc,
165                              int context_base);
166
167 static void ohci1394_pci_remove(struct pci_dev *pdev);
168
169 #ifndef __LITTLE_ENDIAN
170 static const size_t hdr_sizes[] = {
171         3,      /* TCODE_WRITEQ */
172         4,      /* TCODE_WRITEB */
173         3,      /* TCODE_WRITE_RESPONSE */
174         0,      /* reserved */
175         3,      /* TCODE_READQ */
176         4,      /* TCODE_READB */
177         3,      /* TCODE_READQ_RESPONSE */
178         4,      /* TCODE_READB_RESPONSE */
179         1,      /* TCODE_CYCLE_START */
180         4,      /* TCODE_LOCK_REQUEST */
181         2,      /* TCODE_ISO_DATA */
182         4,      /* TCODE_LOCK_RESPONSE */
183                 /* rest is reserved or link-internal */
184 };
185
186 static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
187 {
188         size_t size;
189
190         if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
191                 return;
192
193         size = hdr_sizes[tcode];
194         while (size--)
195                 data[size] = le32_to_cpu(data[size]);
196 }
197 #else
198 #define header_le32_to_cpu(w,x) do {} while (0)
199 #endif /* !LITTLE_ENDIAN */
200
201 /***********************************
202  * IEEE-1394 functionality section *
203  ***********************************/
204
205 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
206 {
207         int i;
208         unsigned long flags;
209         quadlet_t r;
210
211         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
212
213         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
214
215         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
216                 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
217                         break;
218
219                 mdelay(1);
220         }
221
222         r = reg_read(ohci, OHCI1394_PhyControl);
223
224         if (i >= OHCI_LOOP_COUNT)
225                 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
226                        r, r & 0x80000000, i);
227
228         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
229
230         return (r & 0x00ff0000) >> 16;
231 }
232
233 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
234 {
235         int i;
236         unsigned long flags;
237         u32 r = 0;
238
239         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
240
241         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
242
243         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
244                 r = reg_read(ohci, OHCI1394_PhyControl);
245                 if (!(r & 0x00004000))
246                         break;
247
248                 mdelay(1);
249         }
250
251         if (i == OHCI_LOOP_COUNT)
252                 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
253                        r, r & 0x00004000, i);
254
255         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
256
257         return;
258 }
259
260 /* Or's our value into the current value */
261 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
262 {
263         u8 old;
264
265         old = get_phy_reg (ohci, addr);
266         old |= data;
267         set_phy_reg (ohci, addr, old);
268
269         return;
270 }
271
272 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
273                                 int phyid, int isroot)
274 {
275         quadlet_t *q = ohci->selfid_buf_cpu;
276         quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
277         size_t size;
278         quadlet_t q0, q1;
279
280         /* Check status of self-id reception */
281
282         if (ohci->selfid_swap)
283                 q0 = le32_to_cpu(q[0]);
284         else
285                 q0 = q[0];
286
287         if ((self_id_count & 0x80000000) ||
288             ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
289                 PRINT(KERN_ERR,
290                       "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
291                       self_id_count, q0, ohci->self_id_errors);
292
293                 /* Tip by James Goodwin <jamesg@Filanet.com>:
294                  * We had an error, generate another bus reset in response.  */
295                 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
296                         set_phy_reg_mask (ohci, 1, 0x40);
297                         ohci->self_id_errors++;
298                 } else {
299                         PRINT(KERN_ERR,
300                               "Too many errors on SelfID error reception, giving up!");
301                 }
302                 return;
303         }
304
305         /* SelfID Ok, reset error counter. */
306         ohci->self_id_errors = 0;
307
308         size = ((self_id_count & 0x00001FFC) >> 2) - 1;
309         q++;
310
311         while (size > 0) {
312                 if (ohci->selfid_swap) {
313                         q0 = le32_to_cpu(q[0]);
314                         q1 = le32_to_cpu(q[1]);
315                 } else {
316                         q0 = q[0];
317                         q1 = q[1];
318                 }
319
320                 if (q0 == ~q1) {
321                         DBGMSG ("SelfID packet 0x%x received", q0);
322                         hpsb_selfid_received(host, cpu_to_be32(q0));
323                         if (((q0 & 0x3f000000) >> 24) == phyid)
324                                 DBGMSG ("SelfID for this node is 0x%08x", q0);
325                 } else {
326                         PRINT(KERN_ERR,
327                               "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
328                 }
329                 q += 2;
330                 size -= 2;
331         }
332
333         DBGMSG("SelfID complete");
334
335         return;
336 }
337
338 static void ohci_soft_reset(struct ti_ohci *ohci) {
339         int i;
340
341         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
342
343         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
344                 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
345                         break;
346                 mdelay(1);
347         }
348         DBGMSG ("Soft reset finished");
349 }
350
351
352 /* Generate the dma receive prgs and start the context */
353 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
354 {
355         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
356         int i;
357
358         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
359
360         for (i=0; i<d->num_desc; i++) {
361                 u32 c;
362
363                 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
364                 if (generate_irq)
365                         c |= DMA_CTL_IRQ;
366
367                 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
368
369                 /* End of descriptor list? */
370                 if (i + 1 < d->num_desc) {
371                         d->prg_cpu[i]->branchAddress =
372                                 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
373                 } else {
374                         d->prg_cpu[i]->branchAddress =
375                                 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
376                 }
377
378                 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
379                 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
380         }
381
382         d->buf_ind = 0;
383         d->buf_offset = 0;
384
385         if (d->type == DMA_CTX_ISO) {
386                 /* Clear contextControl */
387                 reg_write(ohci, d->ctrlClear, 0xffffffff);
388
389                 /* Set bufferFill, isochHeader, multichannel for IR context */
390                 reg_write(ohci, d->ctrlSet, 0xd0000000);
391
392                 /* Set the context match register to match on all tags */
393                 reg_write(ohci, d->ctxtMatch, 0xf0000000);
394
395                 /* Clear the multi channel mask high and low registers */
396                 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
397                 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
398
399                 /* Set up isoRecvIntMask to generate interrupts */
400                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
401         }
402
403         /* Tell the controller where the first AR program is */
404         reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
405
406         /* Run context */
407         reg_write(ohci, d->ctrlSet, 0x00008000);
408
409         DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
410 }
411
412 /* Initialize the dma transmit context */
413 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
414 {
415         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
416
417         /* Stop the context */
418         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
419
420         d->prg_ind = 0;
421         d->sent_ind = 0;
422         d->free_prgs = d->num_desc;
423         d->branchAddrPtr = NULL;
424         INIT_LIST_HEAD(&d->fifo_list);
425         INIT_LIST_HEAD(&d->pending_list);
426
427         if (d->type == DMA_CTX_ISO) {
428                 /* enable interrupts */
429                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
430         }
431
432         DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
433 }
434
435 /* Count the number of available iso contexts */
436 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
437 {
438         int i,ctx=0;
439         u32 tmp;
440
441         reg_write(ohci, reg, 0xffffffff);
442         tmp = reg_read(ohci, reg);
443
444         DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
445
446         /* Count the number of contexts */
447         for (i=0; i<32; i++) {
448                 if (tmp & 1) ctx++;
449                 tmp >>= 1;
450         }
451         return ctx;
452 }
453
454 /* Global initialization */
455 static void ohci_initialize(struct ti_ohci *ohci)
456 {
457         quadlet_t buf;
458         int num_ports, i;
459
460         spin_lock_init(&ohci->phy_reg_lock);
461
462         /* Put some defaults to these undefined bus options */
463         buf = reg_read(ohci, OHCI1394_BusOptions);
464         buf |=  0x60000000; /* Enable CMC and ISC */
465         if (hpsb_disable_irm)
466                 buf &= ~0x80000000;
467         else
468                 buf |=  0x80000000; /* Enable IRMC */
469         buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
470         buf &= ~0x18000000; /* Disable PMC and BMC */
471         reg_write(ohci, OHCI1394_BusOptions, buf);
472
473         /* Set the bus number */
474         reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
475
476         /* Enable posted writes */
477         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
478
479         /* Clear link control register */
480         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
481
482         /* Enable cycle timer and cycle master and set the IRM
483          * contender bit in our self ID packets if appropriate. */
484         reg_write(ohci, OHCI1394_LinkControlSet,
485                   OHCI1394_LinkControl_CycleTimerEnable |
486                   OHCI1394_LinkControl_CycleMaster);
487         i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
488         if (hpsb_disable_irm)
489                 i &= ~PHY_04_CONTENDER;
490         else
491                 i |= PHY_04_CONTENDER;
492         set_phy_reg(ohci, 4, i);
493
494         /* Set up self-id dma buffer */
495         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
496
497         /* enable self-id */
498         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
499
500         /* Set the Config ROM mapping register */
501         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
502
503         /* Now get our max packet size */
504         ohci->max_packet_size =
505                 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
506                 
507         /* Clear the interrupt mask */
508         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
509         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
510
511         /* Clear the interrupt mask */
512         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
513         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
514
515         /* Initialize AR dma */
516         initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
517         initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
518
519         /* Initialize AT dma */
520         initialize_dma_trm_ctx(&ohci->at_req_context);
521         initialize_dma_trm_ctx(&ohci->at_resp_context);
522         
523         /* Initialize IR Legacy DMA channel mask */
524         ohci->ir_legacy_channels = 0;
525
526         /* Accept AR requests from all nodes */
527         reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
528
529         /* Set the address range of the physical response unit.
530          * Most controllers do not implement it as a writable register though.
531          * They will keep a hardwired offset of 0x00010000 and show 0x0 as
532          * register content.
533          * To actually enable physical responses is the job of our interrupt
534          * handler which programs the physical request filter. */
535         reg_write(ohci, OHCI1394_PhyUpperBound,
536                   OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
537
538         DBGMSG("physUpperBoundOffset=%08x",
539                reg_read(ohci, OHCI1394_PhyUpperBound));
540
541         /* Specify AT retries */
542         reg_write(ohci, OHCI1394_ATRetries,
543                   OHCI1394_MAX_AT_REQ_RETRIES |
544                   (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
545                   (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
546
547         /* We don't want hardware swapping */
548         reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
549
550         /* Enable interrupts */
551         reg_write(ohci, OHCI1394_IntMaskSet,
552                   OHCI1394_unrecoverableError |
553                   OHCI1394_masterIntEnable |
554                   OHCI1394_busReset |
555                   OHCI1394_selfIDComplete |
556                   OHCI1394_RSPkt |
557                   OHCI1394_RQPkt |
558                   OHCI1394_respTxComplete |
559                   OHCI1394_reqTxComplete |
560                   OHCI1394_isochRx |
561                   OHCI1394_isochTx |
562                   OHCI1394_postedWriteErr |
563                   OHCI1394_cycleTooLong |
564                   OHCI1394_cycleInconsistent);
565
566         /* Enable link */
567         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
568
569         buf = reg_read(ohci, OHCI1394_Version);
570         PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d]  "
571               "MMIO=[%llx-%llx]  Max Packet=[%d]  IR/IT contexts=[%d/%d]",
572               ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
573               ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
574               (unsigned long long)pci_resource_start(ohci->dev, 0),
575               (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
576               ohci->max_packet_size,
577               ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
578
579         /* Check all of our ports to make sure that if anything is
580          * connected, we enable that port. */
581         num_ports = get_phy_reg(ohci, 2) & 0xf;
582         for (i = 0; i < num_ports; i++) {
583                 unsigned int status;
584
585                 set_phy_reg(ohci, 7, i);
586                 status = get_phy_reg(ohci, 8);
587
588                 if (status & 0x20)
589                         set_phy_reg(ohci, 8, status & ~1);
590         }
591
592         /* Serial EEPROM Sanity check. */
593         if ((ohci->max_packet_size < 512) ||
594             (ohci->max_packet_size > 4096)) {
595                 /* Serial EEPROM contents are suspect, set a sane max packet
596                  * size and print the raw contents for bug reports if verbose
597                  * debug is enabled. */
598 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
599                 int i;
600 #endif
601
602                 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
603                       "attempting to set max_packet_size to 512 bytes");
604                 reg_write(ohci, OHCI1394_BusOptions,
605                           (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
606                 ohci->max_packet_size = 512;
607 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
608                 PRINT(KERN_DEBUG, "    EEPROM Present: %d",
609                       (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
610                 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
611
612                 for (i = 0;
613                      ((i < 1000) &&
614                       (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
615                         udelay(10);
616
617                 for (i = 0; i < 0x20; i++) {
618                         reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
619                         PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
620                               (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
621                 }
622 #endif
623         }
624 }
625
626 /*
627  * Insert a packet in the DMA fifo and generate the DMA prg
628  * FIXME: rewrite the program in order to accept packets crossing
629  *        page boundaries.
630  *        check also that a single dma descriptor doesn't cross a
631  *        page boundary.
632  */
633 static void insert_packet(struct ti_ohci *ohci,
634                           struct dma_trm_ctx *d, struct hpsb_packet *packet)
635 {
636         u32 cycleTimer;
637         int idx = d->prg_ind;
638
639         DBGMSG("Inserting packet for node " NODE_BUS_FMT
640                ", tlabel=%d, tcode=0x%x, speed=%d",
641                NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
642                packet->tcode, packet->speed_code);
643
644         d->prg_cpu[idx]->begin.address = 0;
645         d->prg_cpu[idx]->begin.branchAddress = 0;
646
647         if (d->type == DMA_CTX_ASYNC_RESP) {
648                 /*
649                  * For response packets, we need to put a timeout value in
650                  * the 16 lower bits of the status... let's try 1 sec timeout
651                  */
652                 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
653                 d->prg_cpu[idx]->begin.status = cpu_to_le32(
654                         (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
655                         ((cycleTimer&0x01fff000)>>12));
656
657                 DBGMSG("cycleTimer: %08x timeStamp: %08x",
658                        cycleTimer, d->prg_cpu[idx]->begin.status);
659         } else 
660                 d->prg_cpu[idx]->begin.status = 0;
661
662         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
663
664                 if (packet->type == hpsb_raw) {
665                         d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
666                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
667                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
668                 } else {
669                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
670                                 (packet->header[0] & 0xFFFF);
671
672                         if (packet->tcode == TCODE_ISO_DATA) {
673                                 /* Sending an async stream packet */
674                                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
675                         } else {
676                                 /* Sending a normal async request or response */
677                                 d->prg_cpu[idx]->data[1] =
678                                         (packet->header[1] & 0xFFFF) |
679                                         (packet->header[0] & 0xFFFF0000);
680                                 d->prg_cpu[idx]->data[2] = packet->header[2];
681                                 d->prg_cpu[idx]->data[3] = packet->header[3];
682                         }
683                         header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
684                 }
685
686                 if (packet->data_size) { /* block transmit */
687                         if (packet->tcode == TCODE_STREAM_DATA){
688                                 d->prg_cpu[idx]->begin.control =
689                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
690                                                     DMA_CTL_IMMEDIATE | 0x8);
691                         } else {
692                                 d->prg_cpu[idx]->begin.control =
693                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
694                                                     DMA_CTL_IMMEDIATE | 0x10);
695                         }
696                         d->prg_cpu[idx]->end.control =
697                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
698                                             DMA_CTL_IRQ |
699                                             DMA_CTL_BRANCH |
700                                             packet->data_size);
701                         /*
702                          * Check that the packet data buffer
703                          * does not cross a page boundary.
704                          *
705                          * XXX Fix this some day. eth1394 seems to trigger
706                          * it, but ignoring it doesn't seem to cause a
707                          * problem.
708                          */
709 #if 0
710                         if (cross_bound((unsigned long)packet->data,
711                                         packet->data_size)>0) {
712                                 /* FIXME: do something about it */
713                                 PRINT(KERN_ERR,
714                                       "%s: packet data addr: %p size %Zd bytes "
715                                       "cross page boundary", __FUNCTION__,
716                                       packet->data, packet->data_size);
717                         }
718 #endif
719                         d->prg_cpu[idx]->end.address = cpu_to_le32(
720                                 pci_map_single(ohci->dev, packet->data,
721                                                packet->data_size,
722                                                PCI_DMA_TODEVICE));
723
724                         d->prg_cpu[idx]->end.branchAddress = 0;
725                         d->prg_cpu[idx]->end.status = 0;
726                         if (d->branchAddrPtr)
727                                 *(d->branchAddrPtr) =
728                                         cpu_to_le32(d->prg_bus[idx] | 0x3);
729                         d->branchAddrPtr =
730                                 &(d->prg_cpu[idx]->end.branchAddress);
731                 } else { /* quadlet transmit */
732                         if (packet->type == hpsb_raw)
733                                 d->prg_cpu[idx]->begin.control =
734                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
735                                                     DMA_CTL_IMMEDIATE |
736                                                     DMA_CTL_IRQ |
737                                                     DMA_CTL_BRANCH |
738                                                     (packet->header_size + 4));
739                         else
740                                 d->prg_cpu[idx]->begin.control =
741                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
742                                                     DMA_CTL_IMMEDIATE |
743                                                     DMA_CTL_IRQ |
744                                                     DMA_CTL_BRANCH |
745                                                     packet->header_size);
746
747                         if (d->branchAddrPtr)
748                                 *(d->branchAddrPtr) =
749                                         cpu_to_le32(d->prg_bus[idx] | 0x2);
750                         d->branchAddrPtr =
751                                 &(d->prg_cpu[idx]->begin.branchAddress);
752                 }
753
754         } else { /* iso packet */
755                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
756                         (packet->header[0] & 0xFFFF);
757                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
758                 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
759
760                 d->prg_cpu[idx]->begin.control =
761                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
762                                     DMA_CTL_IMMEDIATE | 0x8);
763                 d->prg_cpu[idx]->end.control =
764                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
765                                     DMA_CTL_UPDATE |
766                                     DMA_CTL_IRQ |
767                                     DMA_CTL_BRANCH |
768                                     packet->data_size);
769                 d->prg_cpu[idx]->end.address = cpu_to_le32(
770                                 pci_map_single(ohci->dev, packet->data,
771                                 packet->data_size, PCI_DMA_TODEVICE));
772
773                 d->prg_cpu[idx]->end.branchAddress = 0;
774                 d->prg_cpu[idx]->end.status = 0;
775                 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
776                        "                       begin=%08x %08x %08x %08x\n"
777                        "                             %08x %08x %08x %08x\n"
778                        "                       end  =%08x %08x %08x %08x",
779                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
780                        d->prg_cpu[idx]->begin.control,
781                        d->prg_cpu[idx]->begin.address,
782                        d->prg_cpu[idx]->begin.branchAddress,
783                        d->prg_cpu[idx]->begin.status,
784                        d->prg_cpu[idx]->data[0],
785                        d->prg_cpu[idx]->data[1],
786                        d->prg_cpu[idx]->data[2],
787                        d->prg_cpu[idx]->data[3],
788                        d->prg_cpu[idx]->end.control,
789                        d->prg_cpu[idx]->end.address,
790                        d->prg_cpu[idx]->end.branchAddress,
791                        d->prg_cpu[idx]->end.status);
792                 if (d->branchAddrPtr)
793                         *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
794                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
795         }
796         d->free_prgs--;
797
798         /* queue the packet in the appropriate context queue */
799         list_add_tail(&packet->driver_list, &d->fifo_list);
800         d->prg_ind = (d->prg_ind + 1) % d->num_desc;
801 }
802
803 /*
804  * This function fills the FIFO with the (eventual) pending packets
805  * and runs or wakes up the DMA prg if necessary.
806  *
807  * The function MUST be called with the d->lock held.
808  */
809 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
810 {
811         struct hpsb_packet *packet, *ptmp;
812         int idx = d->prg_ind;
813         int z = 0;
814
815         /* insert the packets into the dma fifo */
816         list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
817                 if (!d->free_prgs)
818                         break;
819
820                 /* For the first packet only */
821                 if (!z)
822                         z = (packet->data_size) ? 3 : 2;
823
824                 /* Insert the packet */
825                 list_del_init(&packet->driver_list);
826                 insert_packet(ohci, d, packet);
827         }
828
829         /* Nothing must have been done, either no free_prgs or no packets */
830         if (z == 0)
831                 return;
832
833         /* Is the context running ? (should be unless it is
834            the first packet to be sent in this context) */
835         if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
836                 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
837
838                 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
839                 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
840
841                 /* Check that the node id is valid, and not 63 */
842                 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
843                         PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
844                 else
845                         reg_write(ohci, d->ctrlSet, 0x8000);
846         } else {
847                 /* Wake up the dma context if necessary */
848                 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
849                         DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
850
851                 /* do this always, to avoid race condition */
852                 reg_write(ohci, d->ctrlSet, 0x1000);
853         }
854
855         return;
856 }
857
858 /* Transmission of an async or iso packet */
859 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
860 {
861         struct ti_ohci *ohci = host->hostdata;
862         struct dma_trm_ctx *d;
863         unsigned long flags;
864
865         if (packet->data_size > ohci->max_packet_size) {
866                 PRINT(KERN_ERR,
867                       "Transmit packet size %Zd is too big",
868                       packet->data_size);
869                 return -EOVERFLOW;
870         }
871
872         /* Decide whether we have an iso, a request, or a response packet */
873         if (packet->type == hpsb_raw)
874                 d = &ohci->at_req_context;
875         else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
876                 /* The legacy IT DMA context is initialized on first
877                  * use.  However, the alloc cannot be run from
878                  * interrupt context, so we bail out if that is the
879                  * case. I don't see anyone sending ISO packets from
880                  * interrupt context anyway... */
881
882                 if (ohci->it_legacy_context.ohci == NULL) {
883                         if (in_interrupt()) {
884                                 PRINT(KERN_ERR,
885                                       "legacy IT context cannot be initialized during interrupt");
886                                 return -EINVAL;
887                         }
888
889                         if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
890                                               DMA_CTX_ISO, 0, IT_NUM_DESC,
891                                               OHCI1394_IsoXmitContextBase) < 0) {
892                                 PRINT(KERN_ERR,
893                                       "error initializing legacy IT context");
894                                 return -ENOMEM;
895                         }
896
897                         initialize_dma_trm_ctx(&ohci->it_legacy_context);
898                 }
899
900                 d = &ohci->it_legacy_context;
901         } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
902                 d = &ohci->at_resp_context;
903         else
904                 d = &ohci->at_req_context;
905
906         spin_lock_irqsave(&d->lock,flags);
907
908         list_add_tail(&packet->driver_list, &d->pending_list);
909
910         dma_trm_flush(ohci, d);
911
912         spin_unlock_irqrestore(&d->lock,flags);
913
914         return 0;
915 }
916
917 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
918 {
919         struct ti_ohci *ohci = host->hostdata;
920         int retval = 0;
921         unsigned long flags;
922         int phy_reg;
923
924         switch (cmd) {
925         case RESET_BUS:
926                 switch (arg) {
927                 case SHORT_RESET:
928                         phy_reg = get_phy_reg(ohci, 5);
929                         phy_reg |= 0x40;
930                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
931                         break;
932                 case LONG_RESET:
933                         phy_reg = get_phy_reg(ohci, 1);
934                         phy_reg |= 0x40;
935                         set_phy_reg(ohci, 1, phy_reg); /* set IBR */
936                         break;
937                 case SHORT_RESET_NO_FORCE_ROOT:
938                         phy_reg = get_phy_reg(ohci, 1);
939                         if (phy_reg & 0x80) {
940                                 phy_reg &= ~0x80;
941                                 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
942                         }
943
944                         phy_reg = get_phy_reg(ohci, 5);
945                         phy_reg |= 0x40;
946                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
947                         break;
948                 case LONG_RESET_NO_FORCE_ROOT:
949                         phy_reg = get_phy_reg(ohci, 1);
950                         phy_reg &= ~0x80;
951                         phy_reg |= 0x40;
952                         set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
953                         break;
954                 case SHORT_RESET_FORCE_ROOT:
955                         phy_reg = get_phy_reg(ohci, 1);
956                         if (!(phy_reg & 0x80)) {
957                                 phy_reg |= 0x80;
958                                 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
959                         }
960
961                         phy_reg = get_phy_reg(ohci, 5);
962                         phy_reg |= 0x40;
963                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
964                         break;
965                 case LONG_RESET_FORCE_ROOT:
966                         phy_reg = get_phy_reg(ohci, 1);
967                         phy_reg |= 0xc0;
968                         set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
969                         break;
970                 default:
971                         retval = -1;
972                 }
973                 break;
974
975         case GET_CYCLE_COUNTER:
976                 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
977                 break;
978
979         case SET_CYCLE_COUNTER:
980                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
981                 break;
982
983         case SET_BUS_ID:
984                 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
985                 break;
986
987         case ACT_CYCLE_MASTER:
988                 if (arg) {
989                         /* check if we are root and other nodes are present */
990                         u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
991                         if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
992                                 /*
993                                  * enable cycleTimer, cycleMaster
994                                  */
995                                 DBGMSG("Cycle master enabled");
996                                 reg_write(ohci, OHCI1394_LinkControlSet,
997                                           OHCI1394_LinkControl_CycleTimerEnable |
998                                           OHCI1394_LinkControl_CycleMaster);
999                         }
1000                 } else {
1001                         /* disable cycleTimer, cycleMaster, cycleSource */
1002                         reg_write(ohci, OHCI1394_LinkControlClear,
1003                                   OHCI1394_LinkControl_CycleTimerEnable |
1004                                   OHCI1394_LinkControl_CycleMaster |
1005                                   OHCI1394_LinkControl_CycleSource);
1006                 }
1007                 break;
1008
1009         case CANCEL_REQUESTS:
1010                 DBGMSG("Cancel request received");
1011                 dma_trm_reset(&ohci->at_req_context);
1012                 dma_trm_reset(&ohci->at_resp_context);
1013                 break;
1014
1015         case ISO_LISTEN_CHANNEL:
1016         {
1017                 u64 mask;
1018                 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1019                 int ir_legacy_active;
1020
1021                 if (arg<0 || arg>63) {
1022                         PRINT(KERN_ERR,
1023                               "%s: IS0 listen channel %d is out of range",
1024                               __FUNCTION__, arg);
1025                         return -EFAULT;
1026                 }
1027
1028                 mask = (u64)0x1<<arg;
1029
1030                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1031
1032                 if (ohci->ISO_channel_usage & mask) {
1033                         PRINT(KERN_ERR,
1034                               "%s: IS0 listen channel %d is already used",
1035                               __FUNCTION__, arg);
1036                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1037                         return -EFAULT;
1038                 }
1039
1040                 ir_legacy_active = ohci->ir_legacy_channels;
1041
1042                 ohci->ISO_channel_usage |= mask;
1043                 ohci->ir_legacy_channels |= mask;
1044
1045                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1046
1047                 if (!ir_legacy_active) {
1048                         if (ohci1394_register_iso_tasklet(ohci,
1049                                           &ohci->ir_legacy_tasklet) < 0) {
1050                                 PRINT(KERN_ERR, "No IR DMA context available");
1051                                 return -EBUSY;
1052                         }
1053
1054                         /* the IR context can be assigned to any DMA context
1055                          * by ohci1394_register_iso_tasklet */
1056                         d->ctx = ohci->ir_legacy_tasklet.context;
1057                         d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1058                                 32*d->ctx;
1059                         d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1060                                 32*d->ctx;
1061                         d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1062                         d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1063
1064                         initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1065
1066                         if (printk_ratelimit())
1067                                 DBGMSG("IR legacy activated");
1068                 }
1069
1070                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1071
1072                 if (arg>31)
1073                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1074                                   1<<(arg-32));
1075                 else
1076                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1077                                   1<<arg);
1078
1079                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1080                 DBGMSG("Listening enabled on channel %d", arg);
1081                 break;
1082         }
1083         case ISO_UNLISTEN_CHANNEL:
1084         {
1085                 u64 mask;
1086
1087                 if (arg<0 || arg>63) {
1088                         PRINT(KERN_ERR,
1089                               "%s: IS0 unlisten channel %d is out of range",
1090                               __FUNCTION__, arg);
1091                         return -EFAULT;
1092                 }
1093
1094                 mask = (u64)0x1<<arg;
1095
1096                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1097
1098                 if (!(ohci->ISO_channel_usage & mask)) {
1099                         PRINT(KERN_ERR,
1100                               "%s: IS0 unlisten channel %d is not used",
1101                               __FUNCTION__, arg);
1102                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1103                         return -EFAULT;
1104                 }
1105
1106                 ohci->ISO_channel_usage &= ~mask;
1107                 ohci->ir_legacy_channels &= ~mask;
1108
1109                 if (arg>31)
1110                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1111                                   1<<(arg-32));
1112                 else
1113                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1114                                   1<<arg);
1115
1116                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1117                 DBGMSG("Listening disabled on channel %d", arg);
1118
1119                 if (ohci->ir_legacy_channels == 0) {
1120                         stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1121                         DBGMSG("ISO legacy receive context stopped");
1122                 }
1123
1124                 break;
1125         }
1126         default:
1127                 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1128                         cmd);
1129                 break;
1130         }
1131         return retval;
1132 }
1133
1134 /***********************************
1135  * rawiso ISO reception            *
1136  ***********************************/
1137
1138 /*
1139   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1140   buffer is split into "blocks" (regions described by one DMA
1141   descriptor). Each block must be one page or less in size, and
1142   must not cross a page boundary.
1143
1144   There is one little wrinkle with buffer-fill mode: a packet that
1145   starts in the final block may wrap around into the first block. But
1146   the user API expects all packets to be contiguous. Our solution is
1147   to keep the very last page of the DMA buffer in reserve - if a
1148   packet spans the gap, we copy its tail into this page.
1149 */
1150
1151 struct ohci_iso_recv {
1152         struct ti_ohci *ohci;
1153
1154         struct ohci1394_iso_tasklet task;
1155         int task_active;
1156
1157         enum { BUFFER_FILL_MODE = 0,
1158                PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1159
1160         /* memory and PCI mapping for the DMA descriptors */
1161         struct dma_prog_region prog;
1162         struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1163
1164         /* how many DMA blocks fit in the buffer */
1165         unsigned int nblocks;
1166
1167         /* stride of DMA blocks */
1168         unsigned int buf_stride;
1169
1170         /* number of blocks to batch between interrupts */
1171         int block_irq_interval;
1172
1173         /* block that DMA will finish next */
1174         int block_dma;
1175
1176         /* (buffer-fill only) block that the reader will release next */
1177         int block_reader;
1178
1179         /* (buffer-fill only) bytes of buffer the reader has released,
1180            less than one block */
1181         int released_bytes;
1182
1183         /* (buffer-fill only) buffer offset at which the next packet will appear */
1184         int dma_offset;
1185
1186         /* OHCI DMA context control registers */
1187         u32 ContextControlSet;
1188         u32 ContextControlClear;
1189         u32 CommandPtr;
1190         u32 ContextMatch;
1191 };
1192
1193 static void ohci_iso_recv_task(unsigned long data);
1194 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1195 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1196 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1197 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1198
1199 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1200 {
1201         struct ti_ohci *ohci = iso->host->hostdata;
1202         struct ohci_iso_recv *recv;
1203         int ctx;
1204         int ret = -ENOMEM;
1205
1206         recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1207         if (!recv)
1208                 return -ENOMEM;
1209
1210         iso->hostdata = recv;
1211         recv->ohci = ohci;
1212         recv->task_active = 0;
1213         dma_prog_region_init(&recv->prog);
1214         recv->block = NULL;
1215
1216         /* use buffer-fill mode, unless irq_interval is 1
1217            (note: multichannel requires buffer-fill) */
1218
1219         if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1220              iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1221                 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1222         } else {
1223                 recv->dma_mode = BUFFER_FILL_MODE;
1224         }
1225
1226         /* set nblocks, buf_stride, block_irq_interval */
1227
1228         if (recv->dma_mode == BUFFER_FILL_MODE) {
1229                 recv->buf_stride = PAGE_SIZE;
1230
1231                 /* one block per page of data in the DMA buffer, minus the final guard page */
1232                 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1233                 if (recv->nblocks < 3) {
1234                         DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1235                         goto err;
1236                 }
1237
1238                 /* iso->irq_interval is in packets - translate that to blocks */
1239                 if (iso->irq_interval == 1)
1240                         recv->block_irq_interval = 1;
1241                 else
1242                         recv->block_irq_interval = iso->irq_interval *
1243                                                         ((recv->nblocks+1)/iso->buf_packets);
1244                 if (recv->block_irq_interval*4 > recv->nblocks)
1245                         recv->block_irq_interval = recv->nblocks/4;
1246                 if (recv->block_irq_interval < 1)
1247                         recv->block_irq_interval = 1;
1248
1249         } else {
1250                 int max_packet_size;
1251
1252                 recv->nblocks = iso->buf_packets;
1253                 recv->block_irq_interval = iso->irq_interval;
1254                 if (recv->block_irq_interval * 4 > iso->buf_packets)
1255                         recv->block_irq_interval = iso->buf_packets / 4;
1256                 if (recv->block_irq_interval < 1)
1257                 recv->block_irq_interval = 1;
1258
1259                 /* choose a buffer stride */
1260                 /* must be a power of 2, and <= PAGE_SIZE */
1261
1262                 max_packet_size = iso->buf_size / iso->buf_packets;
1263
1264                 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1265                     recv->buf_stride *= 2);
1266
1267                 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1268                    recv->buf_stride > PAGE_SIZE) {
1269                         /* this shouldn't happen, but anyway... */
1270                         DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1271                         goto err;
1272                 }
1273         }
1274
1275         recv->block_reader = 0;
1276         recv->released_bytes = 0;
1277         recv->block_dma = 0;
1278         recv->dma_offset = 0;
1279
1280         /* size of DMA program = one descriptor per block */
1281         if (dma_prog_region_alloc(&recv->prog,
1282                                  sizeof(struct dma_cmd) * recv->nblocks,
1283                                  recv->ohci->dev))
1284                 goto err;
1285
1286         recv->block = (struct dma_cmd*) recv->prog.kvirt;
1287
1288         ohci1394_init_iso_tasklet(&recv->task,
1289                                   iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1290                                                        OHCI_ISO_RECEIVE,
1291                                   ohci_iso_recv_task, (unsigned long) iso);
1292
1293         if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1294                 ret = -EBUSY;
1295                 goto err;
1296         }
1297
1298         recv->task_active = 1;
1299
1300         /* recv context registers are spaced 32 bytes apart */
1301         ctx = recv->task.context;
1302         recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1303         recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1304         recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1305         recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1306
1307         if (iso->channel == -1) {
1308                 /* clear multi-channel selection mask */
1309                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1310                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1311         }
1312
1313         /* write the DMA program */
1314         ohci_iso_recv_program(iso);
1315
1316         DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1317                " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1318                recv->dma_mode == BUFFER_FILL_MODE ?
1319                "buffer-fill" : "packet-per-buffer",
1320                iso->buf_size/PAGE_SIZE, iso->buf_size,
1321                recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1322
1323         return 0;
1324
1325 err:
1326         ohci_iso_recv_shutdown(iso);
1327         return ret;
1328 }
1329
1330 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1331 {
1332         struct ohci_iso_recv *recv = iso->hostdata;
1333
1334         /* disable interrupts */
1335         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1336
1337         /* halt DMA */
1338         ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1339 }
1340
1341 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1342 {
1343         struct ohci_iso_recv *recv = iso->hostdata;
1344
1345         if (recv->task_active) {
1346                 ohci_iso_recv_stop(iso);
1347                 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1348                 recv->task_active = 0;
1349         }
1350
1351         dma_prog_region_free(&recv->prog);
1352         kfree(recv);
1353         iso->hostdata = NULL;
1354 }
1355
1356 /* set up a "gapped" ring buffer DMA program */
1357 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1358 {
1359         struct ohci_iso_recv *recv = iso->hostdata;
1360         int blk;
1361
1362         /* address of 'branch' field in previous DMA descriptor */
1363         u32 *prev_branch = NULL;
1364
1365         for (blk = 0; blk < recv->nblocks; blk++) {
1366                 u32 control;
1367
1368                 /* the DMA descriptor */
1369                 struct dma_cmd *cmd = &recv->block[blk];
1370
1371                 /* offset of the DMA descriptor relative to the DMA prog buffer */
1372                 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1373
1374                 /* offset of this packet's data within the DMA buffer */
1375                 unsigned long buf_offset = blk * recv->buf_stride;
1376
1377                 if (recv->dma_mode == BUFFER_FILL_MODE) {
1378                         control = 2 << 28; /* INPUT_MORE */
1379                 } else {
1380                         control = 3 << 28; /* INPUT_LAST */
1381                 }
1382
1383                 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1384
1385                 /* interrupt on last block, and at intervals */
1386                 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1387                         control |= 3 << 20; /* want interrupt */
1388                 }
1389
1390                 control |= 3 << 18; /* enable branch to address */
1391                 control |= recv->buf_stride;
1392
1393                 cmd->control = cpu_to_le32(control);
1394                 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1395                 cmd->branchAddress = 0; /* filled in on next loop */
1396                 cmd->status = cpu_to_le32(recv->buf_stride);
1397
1398                 /* link the previous descriptor to this one */
1399                 if (prev_branch) {
1400                         *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1401                 }
1402
1403                 prev_branch = &cmd->branchAddress;
1404         }
1405
1406         /* the final descriptor's branch address and Z should be left at 0 */
1407 }
1408
1409 /* listen or unlisten to a specific channel (multi-channel mode only) */
1410 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1411 {
1412         struct ohci_iso_recv *recv = iso->hostdata;
1413         int reg, i;
1414
1415         if (channel < 32) {
1416                 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1417                 i = channel;
1418         } else {
1419                 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1420                 i = channel - 32;
1421         }
1422
1423         reg_write(recv->ohci, reg, (1 << i));
1424
1425         /* issue a dummy read to force all PCI writes to be posted immediately */
1426         mb();
1427         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1428 }
1429
1430 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1431 {
1432         struct ohci_iso_recv *recv = iso->hostdata;
1433         int i;
1434
1435         for (i = 0; i < 64; i++) {
1436                 if (mask & (1ULL << i)) {
1437                         if (i < 32)
1438                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1439                         else
1440                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1441                 } else {
1442                         if (i < 32)
1443                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1444                         else
1445                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1446                 }
1447         }
1448
1449         /* issue a dummy read to force all PCI writes to be posted immediately */
1450         mb();
1451         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1452 }
1453
1454 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1455 {
1456         struct ohci_iso_recv *recv = iso->hostdata;
1457         struct ti_ohci *ohci = recv->ohci;
1458         u32 command, contextMatch;
1459
1460         reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1461         wmb();
1462
1463         /* always keep ISO headers */
1464         command = (1 << 30);
1465
1466         if (recv->dma_mode == BUFFER_FILL_MODE)
1467                 command |= (1 << 31);
1468
1469         reg_write(recv->ohci, recv->ContextControlSet, command);
1470
1471         /* match on specified tags */
1472         contextMatch = tag_mask << 28;
1473
1474         if (iso->channel == -1) {
1475                 /* enable multichannel reception */
1476                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1477         } else {
1478                 /* listen on channel */
1479                 contextMatch |= iso->channel;
1480         }
1481
1482         if (cycle != -1) {
1483                 u32 seconds;
1484
1485                 /* enable cycleMatch */
1486                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1487
1488                 /* set starting cycle */
1489                 cycle &= 0x1FFF;
1490
1491                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1492                    just snarf them from the current time */
1493                 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1494
1495                 /* advance one second to give some extra time for DMA to start */
1496                 seconds += 1;
1497
1498                 cycle |= (seconds & 3) << 13;
1499
1500                 contextMatch |= cycle << 12;
1501         }
1502
1503         if (sync != -1) {
1504                 /* set sync flag on first DMA descriptor */
1505                 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1506                 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1507
1508                 /* match sync field */
1509                 contextMatch |= (sync&0xf)<<8;
1510         }
1511
1512         reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1513
1514         /* address of first descriptor block */
1515         command = dma_prog_region_offset_to_bus(&recv->prog,
1516                                                 recv->block_dma * sizeof(struct dma_cmd));
1517         command |= 1; /* Z=1 */
1518
1519         reg_write(recv->ohci, recv->CommandPtr, command);
1520
1521         /* enable interrupts */
1522         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1523
1524         wmb();
1525
1526         /* run */
1527         reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1528
1529         /* issue a dummy read of the cycle timer register to force
1530            all PCI writes to be posted immediately */
1531         mb();
1532         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1533
1534         /* check RUN */
1535         if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1536                 PRINT(KERN_ERR,
1537                       "Error starting IR DMA (ContextControl 0x%08x)\n",
1538                       reg_read(recv->ohci, recv->ContextControlSet));
1539                 return -1;
1540         }
1541
1542         return 0;
1543 }
1544
1545 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1546 {
1547         /* re-use the DMA descriptor for the block */
1548         /* by linking the previous descriptor to it */
1549
1550         int next_i = block;
1551         int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1552
1553         struct dma_cmd *next = &recv->block[next_i];
1554         struct dma_cmd *prev = &recv->block[prev_i];
1555         
1556         /* ignore out-of-range requests */
1557         if ((block < 0) || (block > recv->nblocks))
1558                 return;
1559
1560         /* 'next' becomes the new end of the DMA chain,
1561            so disable branch and enable interrupt */
1562         next->branchAddress = 0;
1563         next->control |= cpu_to_le32(3 << 20);
1564         next->status = cpu_to_le32(recv->buf_stride);
1565
1566         /* link prev to next */
1567         prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1568                                                                         sizeof(struct dma_cmd) * next_i)
1569                                           | 1); /* Z=1 */
1570
1571         /* disable interrupt on previous DMA descriptor, except at intervals */
1572         if ((prev_i % recv->block_irq_interval) == 0) {
1573                 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1574         } else {
1575                 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1576         }
1577         wmb();
1578
1579         /* wake up DMA in case it fell asleep */
1580         reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1581 }
1582
1583 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1584                                              struct hpsb_iso_packet_info *info)
1585 {
1586         /* release the memory where the packet was */
1587         recv->released_bytes += info->total_len;
1588
1589         /* have we released enough memory for one block? */
1590         while (recv->released_bytes > recv->buf_stride) {
1591                 ohci_iso_recv_release_block(recv, recv->block_reader);
1592                 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1593                 recv->released_bytes -= recv->buf_stride;
1594         }
1595 }
1596
1597 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1598 {
1599         struct ohci_iso_recv *recv = iso->hostdata;
1600         if (recv->dma_mode == BUFFER_FILL_MODE) {
1601                 ohci_iso_recv_bufferfill_release(recv, info);
1602         } else {
1603                 ohci_iso_recv_release_block(recv, info - iso->infos);
1604         }
1605 }
1606
1607 /* parse all packets from blocks that have been fully received */
1608 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1609 {
1610         int wake = 0;
1611         int runaway = 0;
1612         struct ti_ohci *ohci = recv->ohci;
1613
1614         while (1) {
1615                 /* we expect the next parsable packet to begin at recv->dma_offset */
1616                 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1617
1618                 unsigned int offset;
1619                 unsigned short len, cycle, total_len;
1620                 unsigned char channel, tag, sy;
1621
1622                 unsigned char *p = iso->data_buf.kvirt;
1623
1624                 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1625
1626                 /* don't loop indefinitely */
1627                 if (runaway++ > 100000) {
1628                         atomic_inc(&iso->overflows);
1629                         PRINT(KERN_ERR,
1630                               "IR DMA error - Runaway during buffer parsing!\n");
1631                         break;
1632                 }
1633
1634                 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1635                 if (this_block == recv->block_dma)
1636                         break;
1637
1638                 wake = 1;
1639
1640                 /* parse data length, tag, channel, and sy */
1641
1642                 /* note: we keep our own local copies of 'len' and 'offset'
1643                    so the user can't mess with them by poking in the mmap area */
1644
1645                 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1646
1647                 if (len > 4096) {
1648                         PRINT(KERN_ERR,
1649                               "IR DMA error - bogus 'len' value %u\n", len);
1650                 }
1651
1652                 channel = p[recv->dma_offset+1] & 0x3F;
1653                 tag = p[recv->dma_offset+1] >> 6;
1654                 sy = p[recv->dma_offset+0] & 0xF;
1655
1656                 /* advance to data payload */
1657                 recv->dma_offset += 4;
1658
1659                 /* check for wrap-around */
1660                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1661                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1662                 }
1663
1664                 /* dma_offset now points to the first byte of the data payload */
1665                 offset = recv->dma_offset;
1666
1667                 /* advance to xferStatus/timeStamp */
1668                 recv->dma_offset += len;
1669
1670                 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1671                 /* payload is padded to 4 bytes */
1672                 if (len % 4) {
1673                         recv->dma_offset += 4 - (len%4);
1674                         total_len += 4 - (len%4);
1675                 }
1676
1677                 /* check for wrap-around */
1678                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1679                         /* uh oh, the packet data wraps from the last
1680                            to the first DMA block - make the packet
1681                            contiguous by copying its "tail" into the
1682                            guard page */
1683
1684                         int guard_off = recv->buf_stride*recv->nblocks;
1685                         int tail_len = len - (guard_off - offset);
1686
1687                         if (tail_len > 0  && tail_len < recv->buf_stride) {
1688                                 memcpy(iso->data_buf.kvirt + guard_off,
1689                                        iso->data_buf.kvirt,
1690                                        tail_len);
1691                         }
1692
1693                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1694                 }
1695
1696                 /* parse timestamp */
1697                 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1698                 cycle &= 0x1FFF;
1699
1700                 /* advance to next packet */
1701                 recv->dma_offset += 4;
1702
1703                 /* check for wrap-around */
1704                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1705                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1706                 }
1707
1708                 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1709         }
1710
1711         if (wake)
1712                 hpsb_iso_wake(iso);
1713 }
1714
1715 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1716 {
1717         int loop;
1718         struct ti_ohci *ohci = recv->ohci;
1719
1720         /* loop over all blocks */
1721         for (loop = 0; loop < recv->nblocks; loop++) {
1722
1723                 /* check block_dma to see if it's done */
1724                 struct dma_cmd *im = &recv->block[recv->block_dma];
1725
1726                 /* check the DMA descriptor for new writes to xferStatus */
1727                 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1728
1729                 /* rescount is the number of bytes *remaining to be written* in the block */
1730                 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1731
1732                 unsigned char event = xferstatus & 0x1F;
1733
1734                 if (!event) {
1735                         /* nothing has happened to this block yet */
1736                         break;
1737                 }
1738
1739                 if (event != 0x11) {
1740                         atomic_inc(&iso->overflows);
1741                         PRINT(KERN_ERR,
1742                               "IR DMA error - OHCI error code 0x%02x\n", event);
1743                 }
1744
1745                 if (rescount != 0) {
1746                         /* the card is still writing to this block;
1747                            we can't touch it until it's done */
1748                         break;
1749                 }
1750
1751                 /* OK, the block is finished... */
1752
1753                 /* sync our view of the block */
1754                 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1755
1756                 /* reset the DMA descriptor */
1757                 im->status = recv->buf_stride;
1758
1759                 /* advance block_dma */
1760                 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1761
1762                 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1763                         atomic_inc(&iso->overflows);
1764                         DBGMSG("ISO reception overflow - "
1765                                "ran out of DMA blocks");
1766                 }
1767         }
1768
1769         /* parse any packets that have arrived */
1770         ohci_iso_recv_bufferfill_parse(iso, recv);
1771 }
1772
1773 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1774 {
1775         int count;
1776         int wake = 0;
1777         struct ti_ohci *ohci = recv->ohci;
1778
1779         /* loop over the entire buffer */
1780         for (count = 0; count < recv->nblocks; count++) {
1781                 u32 packet_len = 0;
1782
1783                 /* pointer to the DMA descriptor */
1784                 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1785
1786                 /* check the DMA descriptor for new writes to xferStatus */
1787                 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1788                 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1789
1790                 unsigned char event = xferstatus & 0x1F;
1791
1792                 if (!event) {
1793                         /* this packet hasn't come in yet; we are done for now */
1794                         goto out;
1795                 }
1796
1797                 if (event == 0x11) {
1798                         /* packet received successfully! */
1799
1800                         /* rescount is the number of bytes *remaining* in the packet buffer,
1801                            after the packet was written */
1802                         packet_len = recv->buf_stride - rescount;
1803
1804                 } else if (event == 0x02) {
1805                         PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1806                 } else if (event) {
1807                         PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1808                 }
1809
1810                 /* sync our view of the buffer */
1811                 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1812
1813                 /* record the per-packet info */
1814                 {
1815                         /* iso header is 8 bytes ahead of the data payload */
1816                         unsigned char *hdr;
1817
1818                         unsigned int offset;
1819                         unsigned short cycle;
1820                         unsigned char channel, tag, sy;
1821
1822                         offset = iso->pkt_dma * recv->buf_stride;
1823                         hdr = iso->data_buf.kvirt + offset;
1824
1825                         /* skip iso header */
1826                         offset += 8;
1827                         packet_len -= 8;
1828
1829                         cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1830                         channel = hdr[5] & 0x3F;
1831                         tag = hdr[5] >> 6;
1832                         sy = hdr[4] & 0xF;
1833
1834                         hpsb_iso_packet_received(iso, offset, packet_len,
1835                                         recv->buf_stride, cycle, channel, tag, sy);
1836                 }
1837
1838                 /* reset the DMA descriptor */
1839                 il->status = recv->buf_stride;
1840
1841                 wake = 1;
1842                 recv->block_dma = iso->pkt_dma;
1843         }
1844
1845 out:
1846         if (wake)
1847                 hpsb_iso_wake(iso);
1848 }
1849
1850 static void ohci_iso_recv_task(unsigned long data)
1851 {
1852         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1853         struct ohci_iso_recv *recv = iso->hostdata;
1854
1855         if (recv->dma_mode == BUFFER_FILL_MODE)
1856                 ohci_iso_recv_bufferfill_task(iso, recv);
1857         else
1858                 ohci_iso_recv_packetperbuf_task(iso, recv);
1859 }
1860
1861 /***********************************
1862  * rawiso ISO transmission         *
1863  ***********************************/
1864
1865 struct ohci_iso_xmit {
1866         struct ti_ohci *ohci;
1867         struct dma_prog_region prog;
1868         struct ohci1394_iso_tasklet task;
1869         int task_active;
1870
1871         u32 ContextControlSet;
1872         u32 ContextControlClear;
1873         u32 CommandPtr;
1874 };
1875
1876 /* transmission DMA program:
1877    one OUTPUT_MORE_IMMEDIATE for the IT header
1878    one OUTPUT_LAST for the buffer data */
1879
1880 struct iso_xmit_cmd {
1881         struct dma_cmd output_more_immediate;
1882         u8 iso_hdr[8];
1883         u32 unused[2];
1884         struct dma_cmd output_last;
1885 };
1886
1887 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1888 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1889 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1890 static void ohci_iso_xmit_task(unsigned long data);
1891
1892 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1893 {
1894         struct ohci_iso_xmit *xmit;
1895         unsigned int prog_size;
1896         int ctx;
1897         int ret = -ENOMEM;
1898
1899         xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1900         if (!xmit)
1901                 return -ENOMEM;
1902
1903         iso->hostdata = xmit;
1904         xmit->ohci = iso->host->hostdata;
1905         xmit->task_active = 0;
1906
1907         dma_prog_region_init(&xmit->prog);
1908
1909         prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1910
1911         if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1912                 goto err;
1913
1914         ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1915                                   ohci_iso_xmit_task, (unsigned long) iso);
1916
1917         if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1918                 ret = -EBUSY;
1919                 goto err;
1920         }
1921
1922         xmit->task_active = 1;
1923
1924         /* xmit context registers are spaced 16 bytes apart */
1925         ctx = xmit->task.context;
1926         xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1927         xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1928         xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1929
1930         return 0;
1931
1932 err:
1933         ohci_iso_xmit_shutdown(iso);
1934         return ret;
1935 }
1936
1937 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1938 {
1939         struct ohci_iso_xmit *xmit = iso->hostdata;
1940         struct ti_ohci *ohci = xmit->ohci;
1941
1942         /* disable interrupts */
1943         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1944
1945         /* halt DMA */
1946         if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1947                 /* XXX the DMA context will lock up if you try to send too much data! */
1948                 PRINT(KERN_ERR,
1949                       "you probably exceeded the OHCI card's bandwidth limit - "
1950                       "reload the module and reduce xmit bandwidth");
1951         }
1952 }
1953
1954 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1955 {
1956         struct ohci_iso_xmit *xmit = iso->hostdata;
1957
1958         if (xmit->task_active) {
1959                 ohci_iso_xmit_stop(iso);
1960                 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1961                 xmit->task_active = 0;
1962         }
1963
1964         dma_prog_region_free(&xmit->prog);
1965         kfree(xmit);
1966         iso->hostdata = NULL;
1967 }
1968
1969 static void ohci_iso_xmit_task(unsigned long data)
1970 {
1971         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1972         struct ohci_iso_xmit *xmit = iso->hostdata;
1973         struct ti_ohci *ohci = xmit->ohci;
1974         int wake = 0;
1975         int count;
1976
1977         /* check the whole buffer if necessary, starting at pkt_dma */
1978         for (count = 0; count < iso->buf_packets; count++) {
1979                 int cycle;
1980
1981                 /* DMA descriptor */
1982                 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1983
1984                 /* check for new writes to xferStatus */
1985                 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1986                 u8  event = xferstatus & 0x1F;
1987
1988                 if (!event) {
1989                         /* packet hasn't been sent yet; we are done for now */
1990                         break;
1991                 }
1992
1993                 if (event != 0x11)
1994                         PRINT(KERN_ERR,
1995                               "IT DMA error - OHCI error code 0x%02x\n", event);
1996
1997                 /* at least one packet went out, so wake up the writer */
1998                 wake = 1;
1999
2000                 /* parse cycle */
2001                 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2002
2003                 /* tell the subsystem the packet has gone out */
2004                 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2005
2006                 /* reset the DMA descriptor for next time */
2007                 cmd->output_last.status = 0;
2008         }
2009
2010         if (wake)
2011                 hpsb_iso_wake(iso);
2012 }
2013
2014 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2015 {
2016         struct ohci_iso_xmit *xmit = iso->hostdata;
2017         struct ti_ohci *ohci = xmit->ohci;
2018
2019         int next_i, prev_i;
2020         struct iso_xmit_cmd *next, *prev;
2021
2022         unsigned int offset;
2023         unsigned short len;
2024         unsigned char tag, sy;
2025
2026         /* check that the packet doesn't cross a page boundary
2027            (we could allow this if we added OUTPUT_MORE descriptor support) */
2028         if (cross_bound(info->offset, info->len)) {
2029                 PRINT(KERN_ERR,
2030                       "rawiso xmit: packet %u crosses a page boundary",
2031                       iso->first_packet);
2032                 return -EINVAL;
2033         }
2034
2035         offset = info->offset;
2036         len = info->len;
2037         tag = info->tag;
2038         sy = info->sy;
2039
2040         /* sync up the card's view of the buffer */
2041         dma_region_sync_for_device(&iso->data_buf, offset, len);
2042
2043         /* append first_packet to the DMA chain */
2044         /* by linking the previous descriptor to it */
2045         /* (next will become the new end of the DMA chain) */
2046
2047         next_i = iso->first_packet;
2048         prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2049
2050         next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2051         prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2052
2053         /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2054         memset(next, 0, sizeof(struct iso_xmit_cmd));
2055         next->output_more_immediate.control = cpu_to_le32(0x02000008);
2056
2057         /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2058
2059         /* tcode = 0xA, and sy */
2060         next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2061
2062         /* tag and channel number */
2063         next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2064
2065         /* transmission speed */
2066         next->iso_hdr[2] = iso->speed & 0x7;
2067
2068         /* payload size */
2069         next->iso_hdr[6] = len & 0xFF;
2070         next->iso_hdr[7] = len >> 8;
2071
2072         /* set up the OUTPUT_LAST */
2073         next->output_last.control = cpu_to_le32(1 << 28);
2074         next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2075         next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2076         next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2077         next->output_last.control |= cpu_to_le32(len);
2078
2079         /* payload bus address */
2080         next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2081
2082         /* leave branchAddress at zero for now */
2083
2084         /* re-write the previous DMA descriptor to chain to this one */
2085
2086         /* set prev branch address to point to next (Z=3) */
2087         prev->output_last.branchAddress = cpu_to_le32(
2088                 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2089
2090         /* disable interrupt, unless required by the IRQ interval */
2091         if (prev_i % iso->irq_interval) {
2092                 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2093         } else {
2094                 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2095         }
2096
2097         wmb();
2098
2099         /* wake DMA in case it is sleeping */
2100         reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2101
2102         /* issue a dummy read of the cycle timer to force all PCI
2103            writes to be posted immediately */
2104         mb();
2105         reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2106
2107         return 0;
2108 }
2109
2110 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2111 {
2112         struct ohci_iso_xmit *xmit = iso->hostdata;
2113         struct ti_ohci *ohci = xmit->ohci;
2114
2115         /* clear out the control register */
2116         reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2117         wmb();
2118
2119         /* address and length of first descriptor block (Z=3) */
2120         reg_write(xmit->ohci, xmit->CommandPtr,
2121                   dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2122
2123         /* cycle match */
2124         if (cycle != -1) {
2125                 u32 start = cycle & 0x1FFF;
2126
2127                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2128                    just snarf them from the current time */
2129                 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2130
2131                 /* advance one second to give some extra time for DMA to start */
2132                 seconds += 1;
2133
2134                 start |= (seconds & 3) << 13;
2135
2136                 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2137         }
2138
2139         /* enable interrupts */
2140         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2141
2142         /* run */
2143         reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2144         mb();
2145
2146         /* wait 100 usec to give the card time to go active */
2147         udelay(100);
2148
2149         /* check the RUN bit */
2150         if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2151                 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2152                       reg_read(xmit->ohci, xmit->ContextControlSet));
2153                 return -1;
2154         }
2155
2156         return 0;
2157 }
2158
2159 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2160 {
2161
2162         switch(cmd) {
2163         case XMIT_INIT:
2164                 return ohci_iso_xmit_init(iso);
2165         case XMIT_START:
2166                 return ohci_iso_xmit_start(iso, arg);
2167         case XMIT_STOP:
2168                 ohci_iso_xmit_stop(iso);
2169                 return 0;
2170         case XMIT_QUEUE:
2171                 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2172         case XMIT_SHUTDOWN:
2173                 ohci_iso_xmit_shutdown(iso);
2174                 return 0;
2175
2176         case RECV_INIT:
2177                 return ohci_iso_recv_init(iso);
2178         case RECV_START: {
2179                 int *args = (int*) arg;
2180                 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2181         }
2182         case RECV_STOP:
2183                 ohci_iso_recv_stop(iso);
2184                 return 0;
2185         case RECV_RELEASE:
2186                 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2187                 return 0;
2188         case RECV_FLUSH:
2189                 ohci_iso_recv_task((unsigned long) iso);
2190                 return 0;
2191         case RECV_SHUTDOWN:
2192                 ohci_iso_recv_shutdown(iso);
2193                 return 0;
2194         case RECV_LISTEN_CHANNEL:
2195                 ohci_iso_recv_change_channel(iso, arg, 1);
2196                 return 0;
2197         case RECV_UNLISTEN_CHANNEL:
2198                 ohci_iso_recv_change_channel(iso, arg, 0);
2199                 return 0;
2200         case RECV_SET_CHANNEL_MASK:
2201                 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2202                 return 0;
2203
2204         default:
2205                 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2206                         cmd);
2207                 break;
2208         }
2209         return -EINVAL;
2210 }
2211
2212 /***************************************
2213  * IEEE-1394 functionality section END *
2214  ***************************************/
2215
2216
2217 /********************************************************
2218  * Global stuff (interrupt handler, init/shutdown code) *
2219  ********************************************************/
2220
2221 static void dma_trm_reset(struct dma_trm_ctx *d)
2222 {
2223         unsigned long flags;
2224         LIST_HEAD(packet_list);
2225         struct ti_ohci *ohci = d->ohci;
2226         struct hpsb_packet *packet, *ptmp;
2227
2228         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2229
2230         /* Lock the context, reset it and release it. Move the packets
2231          * that were pending in the context to packet_list and free
2232          * them after releasing the lock. */
2233
2234         spin_lock_irqsave(&d->lock, flags);
2235
2236         list_splice(&d->fifo_list, &packet_list);
2237         list_splice(&d->pending_list, &packet_list);
2238         INIT_LIST_HEAD(&d->fifo_list);
2239         INIT_LIST_HEAD(&d->pending_list);
2240
2241         d->branchAddrPtr = NULL;
2242         d->sent_ind = d->prg_ind;
2243         d->free_prgs = d->num_desc;
2244
2245         spin_unlock_irqrestore(&d->lock, flags);
2246
2247         if (list_empty(&packet_list))
2248                 return;
2249
2250         PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2251
2252         /* Now process subsystem callbacks for the packets from this
2253          * context. */
2254         list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2255                 list_del_init(&packet->driver_list);
2256                 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2257         }
2258 }
2259
2260 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2261                                        quadlet_t rx_event,
2262                                        quadlet_t tx_event)
2263 {
2264         struct ohci1394_iso_tasklet *t;
2265         unsigned long mask;
2266         unsigned long flags;
2267
2268         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2269
2270         list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2271                 mask = 1 << t->context;
2272
2273                 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2274                         tasklet_schedule(&t->tasklet);
2275                 else if (rx_event & mask)
2276                         tasklet_schedule(&t->tasklet);
2277         }
2278
2279         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2280 }
2281
2282 static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2283 {
2284         quadlet_t event, node_id;
2285         struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2286         struct hpsb_host *host = ohci->host;
2287         int phyid = -1, isroot = 0;
2288         unsigned long flags;
2289
2290         /* Read and clear the interrupt event register.  Don't clear
2291          * the busReset event, though. This is done when we get the
2292          * selfIDComplete interrupt. */
2293         spin_lock_irqsave(&ohci->event_lock, flags);
2294         event = reg_read(ohci, OHCI1394_IntEventClear);
2295         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2296         spin_unlock_irqrestore(&ohci->event_lock, flags);
2297
2298         if (!event)
2299                 return IRQ_NONE;
2300
2301         /* If event is ~(u32)0 cardbus card was ejected.  In this case
2302          * we just return, and clean up in the ohci1394_pci_remove
2303          * function. */
2304         if (event == ~(u32) 0) {
2305                 DBGMSG("Device removed.");
2306                 return IRQ_NONE;
2307         }
2308
2309         DBGMSG("IntEvent: %08x", event);
2310
2311         if (event & OHCI1394_unrecoverableError) {
2312                 int ctx;
2313                 PRINT(KERN_ERR, "Unrecoverable error!");
2314
2315                 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2316                         PRINT(KERN_ERR, "Async Req Tx Context died: "
2317                                 "ctrl[%08x] cmdptr[%08x]",
2318                                 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2319                                 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2320
2321                 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2322                         PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2323                                 "ctrl[%08x] cmdptr[%08x]",
2324                                 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2325                                 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2326
2327                 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2328                         PRINT(KERN_ERR, "Async Req Rcv Context died: "
2329                                 "ctrl[%08x] cmdptr[%08x]",
2330                                 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2331                                 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2332
2333                 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2334                         PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2335                                 "ctrl[%08x] cmdptr[%08x]",
2336                                 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2337                                 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2338
2339                 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2340                         if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2341                                 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2342                                         "ctrl[%08x] cmdptr[%08x]", ctx,
2343                                         reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2344                                         reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2345                 }
2346
2347                 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2348                         if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2349                                 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2350                                         "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2351                                         reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2352                                         reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2353                                         reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2354                 }
2355
2356                 event &= ~OHCI1394_unrecoverableError;
2357         }
2358         if (event & OHCI1394_postedWriteErr) {
2359                 PRINT(KERN_ERR, "physical posted write error");
2360                 /* no recovery strategy yet, had to involve protocol drivers */
2361                 event &= ~OHCI1394_postedWriteErr;
2362         }
2363         if (event & OHCI1394_cycleTooLong) {
2364                 if(printk_ratelimit())
2365                         PRINT(KERN_WARNING, "isochronous cycle too long");
2366                 else
2367                         DBGMSG("OHCI1394_cycleTooLong");
2368                 reg_write(ohci, OHCI1394_LinkControlSet,
2369                           OHCI1394_LinkControl_CycleMaster);
2370                 event &= ~OHCI1394_cycleTooLong;
2371         }
2372         if (event & OHCI1394_cycleInconsistent) {
2373                 /* We subscribe to the cycleInconsistent event only to
2374                  * clear the corresponding event bit... otherwise,
2375                  * isochronous cycleMatch DMA won't work. */
2376                 DBGMSG("OHCI1394_cycleInconsistent");
2377                 event &= ~OHCI1394_cycleInconsistent;
2378         }
2379         if (event & OHCI1394_busReset) {
2380                 /* The busReset event bit can't be cleared during the
2381                  * selfID phase, so we disable busReset interrupts, to
2382                  * avoid burying the cpu in interrupt requests. */
2383                 spin_lock_irqsave(&ohci->event_lock, flags);
2384                 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2385
2386                 if (ohci->check_busreset) {
2387                         int loop_count = 0;
2388
2389                         udelay(10);
2390
2391                         while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2392                                 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2393
2394                                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2395                                 udelay(10);
2396                                 spin_lock_irqsave(&ohci->event_lock, flags);
2397
2398                                 /* The loop counter check is to prevent the driver
2399                                  * from remaining in this state forever. For the
2400                                  * initial bus reset, the loop continues for ever
2401                                  * and the system hangs, until some device is plugged-in
2402                                  * or out manually into a port! The forced reset seems
2403                                  * to solve this problem. This mainly effects nForce2. */
2404                                 if (loop_count > 10000) {
2405                                         ohci_devctl(host, RESET_BUS, LONG_RESET);
2406                                         DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2407                                         loop_count = 0;
2408                                 }
2409
2410                                 loop_count++;
2411                         }
2412                 }
2413                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2414                 if (!host->in_bus_reset) {
2415                         DBGMSG("irq_handler: Bus reset requested");
2416
2417                         /* Subsystem call */
2418                         hpsb_bus_reset(ohci->host);
2419                 }
2420                 event &= ~OHCI1394_busReset;
2421         }
2422         if (event & OHCI1394_reqTxComplete) {
2423                 struct dma_trm_ctx *d = &ohci->at_req_context;
2424                 DBGMSG("Got reqTxComplete interrupt "
2425                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2426                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2427                         ohci1394_stop_context(ohci, d->ctrlClear,
2428                                               "reqTxComplete");
2429                 else
2430                         dma_trm_tasklet((unsigned long)d);
2431                         //tasklet_schedule(&d->task);
2432                 event &= ~OHCI1394_reqTxComplete;
2433         }
2434         if (event & OHCI1394_respTxComplete) {
2435                 struct dma_trm_ctx *d = &ohci->at_resp_context;
2436                 DBGMSG("Got respTxComplete interrupt "
2437                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2438                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2439                         ohci1394_stop_context(ohci, d->ctrlClear,
2440                                               "respTxComplete");
2441                 else
2442                         tasklet_schedule(&d->task);
2443                 event &= ~OHCI1394_respTxComplete;
2444         }
2445         if (event & OHCI1394_RQPkt) {
2446                 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2447                 DBGMSG("Got RQPkt interrupt status=0x%08X",
2448                        reg_read(ohci, d->ctrlSet));
2449                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2450                         ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2451                 else
2452                         tasklet_schedule(&d->task);
2453                 event &= ~OHCI1394_RQPkt;
2454         }
2455         if (event & OHCI1394_RSPkt) {
2456                 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2457                 DBGMSG("Got RSPkt interrupt status=0x%08X",
2458                        reg_read(ohci, d->ctrlSet));
2459                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2460                         ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2461                 else
2462                         tasklet_schedule(&d->task);
2463                 event &= ~OHCI1394_RSPkt;
2464         }
2465         if (event & OHCI1394_isochRx) {
2466                 quadlet_t rx_event;
2467
2468                 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2469                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2470                 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2471                 event &= ~OHCI1394_isochRx;
2472         }
2473         if (event & OHCI1394_isochTx) {
2474                 quadlet_t tx_event;
2475
2476                 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2477                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2478                 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2479                 event &= ~OHCI1394_isochTx;
2480         }
2481         if (event & OHCI1394_selfIDComplete) {
2482                 if (host->in_bus_reset) {
2483                         node_id = reg_read(ohci, OHCI1394_NodeID);
2484
2485                         if (!(node_id & 0x80000000)) {
2486                                 PRINT(KERN_ERR,
2487                                       "SelfID received, but NodeID invalid "
2488                                       "(probably new bus reset occurred): %08X",
2489                                       node_id);
2490                                 goto selfid_not_valid;
2491                         }
2492
2493                         phyid =  node_id & 0x0000003f;
2494                         isroot = (node_id & 0x40000000) != 0;
2495
2496                         DBGMSG("SelfID interrupt received "
2497                               "(phyid %d, %s)", phyid,
2498                               (isroot ? "root" : "not root"));
2499
2500                         handle_selfid(ohci, host, phyid, isroot);
2501
2502                         /* Clear the bus reset event and re-enable the
2503                          * busReset interrupt.  */
2504                         spin_lock_irqsave(&ohci->event_lock, flags);
2505                         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2506                         reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2507                         spin_unlock_irqrestore(&ohci->event_lock, flags);
2508
2509                         /* Turn on phys dma reception.
2510                          *
2511                          * TODO: Enable some sort of filtering management.
2512                          */
2513                         if (phys_dma) {
2514                                 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2515                                           0xffffffff);
2516                                 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2517                                           0xffffffff);
2518                         }
2519
2520                         DBGMSG("PhyReqFilter=%08x%08x",
2521                                reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2522                                reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2523
2524                         hpsb_selfid_complete(host, phyid, isroot);
2525                 } else
2526                         PRINT(KERN_ERR,
2527                               "SelfID received outside of bus reset sequence");
2528
2529 selfid_not_valid:
2530                 event &= ~OHCI1394_selfIDComplete;
2531         }
2532
2533         /* Make sure we handle everything, just in case we accidentally
2534          * enabled an interrupt that we didn't write a handler for.  */
2535         if (event)
2536                 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2537                       event);
2538
2539         return IRQ_HANDLED;
2540 }
2541
2542 /* Put the buffer back into the dma context */
2543 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2544 {
2545         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2546         DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2547
2548         d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2549         d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2550         idx = (idx + d->num_desc - 1 ) % d->num_desc;
2551         d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2552
2553         /* To avoid a race, ensure 1394 interface hardware sees the inserted
2554          * context program descriptors before it sees the wakeup bit set. */
2555         wmb();
2556         
2557         /* wake up the dma context if necessary */
2558         if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2559                 PRINT(KERN_INFO,
2560                       "Waking dma ctx=%d ... processing is probably too slow",
2561                       d->ctx);
2562         }
2563
2564         /* do this always, to avoid race condition */
2565         reg_write(ohci, d->ctrlSet, 0x1000);
2566 }
2567
2568 #define cond_le32_to_cpu(data, noswap) \
2569         (noswap ? data : le32_to_cpu(data))
2570
2571 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2572                             -1, 0, -1, 0, -1, -1, 16, -1};
2573
2574 /*
2575  * Determine the length of a packet in the buffer
2576  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2577  */
2578 static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2579                                 quadlet_t *buf_ptr, int offset,
2580                                 unsigned char tcode, int noswap)
2581 {
2582         int length = -1;
2583
2584         if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2585                 length = TCODE_SIZE[tcode];
2586                 if (length == 0) {
2587                         if (offset + 12 >= d->buf_size) {
2588                                 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2589                                                 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2590                         } else {
2591                                 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2592                         }
2593                         length += 20;
2594                 }
2595         } else if (d->type == DMA_CTX_ISO) {
2596                 /* Assumption: buffer fill mode with header/trailer */
2597                 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2598         }
2599
2600         if (length > 0 && length % 4)
2601                 length += 4 - (length % 4);
2602
2603         return length;
2604 }
2605
2606 /* Tasklet that processes dma receive buffers */
2607 static void dma_rcv_tasklet (unsigned long data)
2608 {
2609         struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2610         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2611         unsigned int split_left, idx, offset, rescount;
2612         unsigned char tcode;
2613         int length, bytes_left, ack;
2614         unsigned long flags;
2615         quadlet_t *buf_ptr;
2616         char *split_ptr;
2617         char msg[256];
2618
2619         spin_lock_irqsave(&d->lock, flags);
2620
2621         idx = d->buf_ind;
2622         offset = d->buf_offset;
2623         buf_ptr = d->buf_cpu[idx] + offset/4;
2624
2625         rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2626         bytes_left = d->buf_size - rescount - offset;
2627
2628         while (bytes_left > 0) {
2629                 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2630
2631                 /* packet_length() will return < 4 for an error */
2632                 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2633
2634                 if (length < 4) { /* something is wrong */
2635                         sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2636                                 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2637                                 d->ctx, length);
2638                         ohci1394_stop_context(ohci, d->ctrlClear, msg);
2639                         spin_unlock_irqrestore(&d->lock, flags);
2640                         return;
2641                 }
2642
2643                 /* The first case is where we have a packet that crosses
2644                  * over more than one descriptor. The next case is where
2645                  * it's all in the first descriptor.  */
2646                 if ((offset + length) > d->buf_size) {
2647                         DBGMSG("Split packet rcv'd");
2648                         if (length > d->split_buf_size) {
2649                                 ohci1394_stop_context(ohci, d->ctrlClear,
2650                                              "Split packet size exceeded");
2651                                 d->buf_ind = idx;
2652                                 d->buf_offset = offset;
2653                                 spin_unlock_irqrestore(&d->lock, flags);
2654                                 return;
2655                         }
2656
2657                         if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2658                             == d->buf_size) {
2659                                 /* Other part of packet not written yet.
2660                                  * this should never happen I think
2661                                  * anyway we'll get it on the next call.  */
2662                                 PRINT(KERN_INFO,
2663                                       "Got only half a packet!");
2664                                 d->buf_ind = idx;
2665                                 d->buf_offset = offset;
2666                                 spin_unlock_irqrestore(&d->lock, flags);
2667                                 return;
2668                         }
2669
2670                         split_left = length;
2671                         split_ptr = (char *)d->spb;
2672                         memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2673                         split_left -= d->buf_size-offset;
2674                         split_ptr += d->buf_size-offset;
2675                         insert_dma_buffer(d, idx);
2676                         idx = (idx+1) % d->num_desc;
2677                         buf_ptr = d->buf_cpu[idx];
2678                         offset=0;
2679
2680                         while (split_left >= d->buf_size) {
2681                                 memcpy(split_ptr,buf_ptr,d->buf_size);
2682                                 split_ptr += d->buf_size;
2683                                 split_left -= d->buf_size;
2684                                 insert_dma_buffer(d, idx);
2685                                 idx = (idx+1) % d->num_desc;
2686                                 buf_ptr = d->buf_cpu[idx];
2687                         }
2688
2689                         if (split_left > 0) {
2690                                 memcpy(split_ptr, buf_ptr, split_left);
2691                                 offset = split_left;
2692                                 buf_ptr += offset/4;
2693                         }
2694                 } else {
2695                         DBGMSG("Single packet rcv'd");
2696                         memcpy(d->spb, buf_ptr, length);
2697                         offset += length;
2698                         buf_ptr += length/4;
2699                         if (offset==d->buf_size) {
2700                                 insert_dma_buffer(d, idx);
2701                                 idx = (idx+1) % d->num_desc;
2702                                 buf_ptr = d->buf_cpu[idx];
2703                                 offset=0;
2704                         }
2705                 }
2706
2707                 /* We get one phy packet to the async descriptor for each
2708                  * bus reset. We always ignore it.  */
2709                 if (tcode != OHCI1394_TCODE_PHY) {
2710                         if (!ohci->no_swap_incoming)
2711                                 header_le32_to_cpu(d->spb, tcode);
2712                         DBGMSG("Packet received from node"
2713                                 " %d ack=0x%02X spd=%d tcode=0x%X"
2714                                 " length=%d ctx=%d tlabel=%d",
2715                                 (d->spb[1]>>16)&0x3f,
2716                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2717                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2718                                 tcode, length, d->ctx,
2719                                 (d->spb[0]>>10)&0x3f);
2720
2721                         ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2722                                 == 0x11) ? 1 : 0;
2723
2724                         hpsb_packet_received(ohci->host, d->spb,
2725                                              length-4, ack);
2726                 }
2727 #ifdef OHCI1394_DEBUG
2728                 else
2729                         PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2730                                d->ctx);
2731 #endif
2732
2733                 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2734
2735                 bytes_left = d->buf_size - rescount - offset;
2736
2737         }
2738
2739         d->buf_ind = idx;
2740         d->buf_offset = offset;
2741
2742         spin_unlock_irqrestore(&d->lock, flags);
2743 }
2744
2745 /* Bottom half that processes sent packets */
2746 static void dma_trm_tasklet (unsigned long data)
2747 {
2748         struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2749         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2750         struct hpsb_packet *packet, *ptmp;
2751         unsigned long flags;
2752         u32 status, ack;
2753         size_t datasize;
2754
2755         spin_lock_irqsave(&d->lock, flags);
2756
2757         list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2758                 datasize = packet->data_size;
2759                 if (datasize && packet->type != hpsb_raw)
2760                         status = le32_to_cpu(
2761                                 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2762                 else
2763                         status = le32_to_cpu(
2764                                 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2765
2766                 if (status == 0)
2767                         /* this packet hasn't been sent yet*/
2768                         break;
2769
2770 #ifdef OHCI1394_DEBUG
2771                 if (datasize)
2772                         if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2773                                 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2774                                        "ack=0x%X spd=%d dataLength=%d ctx=%d",
2775                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2776                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2777                                        status&0x1f, (status>>5)&0x3,
2778                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2779                                        d->ctx);
2780                         else
2781                                 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2782                                        "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2783                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2784                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2785                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2786                                        status&0x1f, (status>>5)&0x3,
2787                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2788                                        d->ctx);
2789                 else
2790                         DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2791                                "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2792                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2793                                         >>16)&0x3f,
2794                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2795                                         >>4)&0xf,
2796                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2797                                         >>10)&0x3f,
2798                                 status&0x1f, (status>>5)&0x3,
2799                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2800                                 d->ctx);
2801 #endif
2802
2803                 if (status & 0x10) {
2804                         ack = status & 0xf;
2805                 } else {
2806                         switch (status & 0x1f) {
2807                         case EVT_NO_STATUS: /* that should never happen */
2808                         case EVT_RESERVED_A: /* that should never happen */
2809                         case EVT_LONG_PACKET: /* that should never happen */
2810                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2811                                 ack = ACKX_SEND_ERROR;
2812                                 break;
2813                         case EVT_MISSING_ACK:
2814                                 ack = ACKX_TIMEOUT;
2815                                 break;
2816                         case EVT_UNDERRUN:
2817                                 ack = ACKX_SEND_ERROR;
2818                                 break;
2819                         case EVT_OVERRUN: /* that should never happen */
2820                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2821                                 ack = ACKX_SEND_ERROR;
2822                                 break;
2823                         case EVT_DESCRIPTOR_READ:
2824                         case EVT_DATA_READ:
2825                         case EVT_DATA_WRITE:
2826                                 ack = ACKX_SEND_ERROR;
2827                                 break;
2828                         case EVT_BUS_RESET: /* that should never happen */
2829                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2830                                 ack = ACKX_SEND_ERROR;
2831                                 break;
2832                         case EVT_TIMEOUT:
2833                                 ack = ACKX_TIMEOUT;
2834                                 break;
2835                         case EVT_TCODE_ERR:
2836                                 ack = ACKX_SEND_ERROR;
2837                                 break;
2838                         case EVT_RESERVED_B: /* that should never happen */
2839                         case EVT_RESERVED_C: /* that should never happen */
2840                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2841                                 ack = ACKX_SEND_ERROR;
2842                                 break;
2843                         case EVT_UNKNOWN:
2844                         case EVT_FLUSHED:
2845                                 ack = ACKX_SEND_ERROR;
2846                                 break;
2847                         default:
2848                                 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2849                                 ack = ACKX_SEND_ERROR;
2850                                 BUG();
2851                         }
2852                 }
2853
2854                 list_del_init(&packet->driver_list);
2855                 hpsb_packet_sent(ohci->host, packet, ack);
2856
2857                 if (datasize)
2858                         pci_unmap_single(ohci->dev,
2859                                          cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2860                                          datasize, PCI_DMA_TODEVICE);
2861
2862                 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2863                 d->free_prgs++;
2864         }
2865
2866         dma_trm_flush(ohci, d);
2867
2868         spin_unlock_irqrestore(&d->lock, flags);
2869 }
2870
2871 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2872 {
2873         if (d->ctrlClear) {
2874                 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2875
2876                 if (d->type == DMA_CTX_ISO) {
2877                         /* disable interrupts */
2878                         reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2879                         ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2880                 } else {
2881                         tasklet_kill(&d->task);
2882                 }
2883         }
2884 }
2885
2886
2887 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2888 {
2889         int i;
2890         struct ti_ohci *ohci = d->ohci;
2891
2892         if (ohci == NULL)
2893                 return;
2894
2895         DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2896
2897         if (d->buf_cpu) {
2898                 for (i=0; i<d->num_desc; i++)
2899                         if (d->buf_cpu[i] && d->buf_bus[i])
2900                                 pci_free_consistent(
2901                                         ohci->dev, d->buf_size,
2902                                         d->buf_cpu[i], d->buf_bus[i]);
2903                 kfree(d->buf_cpu);
2904                 kfree(d->buf_bus);
2905         }
2906         if (d->prg_cpu) {
2907                 for (i=0; i<d->num_desc; i++)
2908                         if (d->prg_cpu[i] && d->prg_bus[i])
2909                                 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2910                                               d->prg_bus[i]);
2911                 pci_pool_destroy(d->prg_pool);
2912                 kfree(d->prg_cpu);
2913                 kfree(d->prg_bus);
2914         }
2915         kfree(d->spb);
2916
2917         /* Mark this context as freed. */
2918         d->ohci = NULL;
2919 }
2920
2921 static int
2922 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2923                   enum context_type type, int ctx, int num_desc,
2924                   int buf_size, int split_buf_size, int context_base)
2925 {
2926         int i, len;
2927         static int num_allocs;
2928         static char pool_name[20];
2929
2930         d->ohci = ohci;
2931         d->type = type;
2932         d->ctx = ctx;
2933
2934         d->num_desc = num_desc;
2935         d->buf_size = buf_size;
2936         d->split_buf_size = split_buf_size;
2937
2938         d->ctrlSet = 0;
2939         d->ctrlClear = 0;
2940         d->cmdPtr = 0;
2941
2942         d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2943         d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2944
2945         if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2946                 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2947                 free_dma_rcv_ctx(d);
2948                 return -ENOMEM;
2949         }
2950
2951         d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2952         d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2953
2954         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2955                 PRINT(KERN_ERR, "Failed to allocate dma prg");
2956                 free_dma_rcv_ctx(d);
2957                 return -ENOMEM;
2958         }
2959
2960         d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2961
2962         if (d->spb == NULL) {
2963                 PRINT(KERN_ERR, "Failed to allocate split buffer");
2964                 free_dma_rcv_ctx(d);
2965                 return -ENOMEM;
2966         }
2967         
2968         len = sprintf(pool_name, "ohci1394_rcv_prg");
2969         sprintf(pool_name+len, "%d", num_allocs);
2970         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2971                                 sizeof(struct dma_cmd), 4, 0);
2972         if(d->prg_pool == NULL)
2973         {
2974                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2975                 free_dma_rcv_ctx(d);
2976                 return -ENOMEM;
2977         }
2978         num_allocs++;
2979
2980         for (i=0; i<d->num_desc; i++) {
2981                 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2982                                                      d->buf_size,
2983                                                      d->buf_bus+i);
2984
2985                 if (d->buf_cpu[i] != NULL) {
2986                         memset(d->buf_cpu[i], 0, d->buf_size);
2987                 } else {
2988                         PRINT(KERN_ERR,
2989                               "Failed to allocate dma buffer");
2990                         free_dma_rcv_ctx(d);
2991                         return -ENOMEM;
2992                 }
2993
2994                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2995
2996                 if (d->prg_cpu[i] != NULL) {
2997                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2998                 } else {
2999                         PRINT(KERN_ERR,
3000                               "Failed to allocate dma prg");
3001                         free_dma_rcv_ctx(d);
3002                         return -ENOMEM;
3003                 }
3004         }
3005
3006         spin_lock_init(&d->lock);
3007
3008         if (type == DMA_CTX_ISO) {
3009                 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3010                                           OHCI_ISO_MULTICHANNEL_RECEIVE,
3011                                           dma_rcv_tasklet, (unsigned long) d);
3012         } else {
3013                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3014                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3015                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3016
3017                 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3018         }
3019
3020         return 0;
3021 }
3022
3023 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3024 {
3025         int i;
3026         struct ti_ohci *ohci = d->ohci;
3027
3028         if (ohci == NULL)
3029                 return;
3030
3031         DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3032
3033         if (d->prg_cpu) {
3034                 for (i=0; i<d->num_desc; i++)
3035                         if (d->prg_cpu[i] && d->prg_bus[i])
3036                                 pci_pool_free(d->prg_pool, d->prg_cpu[i],
3037                                               d->prg_bus[i]);
3038                 pci_pool_destroy(d->prg_pool);
3039                 kfree(d->prg_cpu);
3040                 kfree(d->prg_bus);
3041         }
3042
3043         /* Mark this context as freed. */
3044         d->ohci = NULL;
3045 }
3046
3047 static int
3048 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3049                   enum context_type type, int ctx, int num_desc,
3050                   int context_base)
3051 {
3052         int i, len;
3053         static char pool_name[20];
3054         static int num_allocs=0;
3055
3056         d->ohci = ohci;
3057         d->type = type;
3058         d->ctx = ctx;
3059         d->num_desc = num_desc;
3060         d->ctrlSet = 0;
3061         d->ctrlClear = 0;
3062         d->cmdPtr = 0;
3063
3064         d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3065         d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3066
3067         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3068                 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3069                 free_dma_trm_ctx(d);
3070                 return -ENOMEM;
3071         }
3072
3073         len = sprintf(pool_name, "ohci1394_trm_prg");
3074         sprintf(pool_name+len, "%d", num_allocs);
3075         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3076                                 sizeof(struct at_dma_prg), 4, 0);
3077         if (d->prg_pool == NULL) {
3078                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3079                 free_dma_trm_ctx(d);
3080                 return -ENOMEM;
3081         }
3082         num_allocs++;
3083
3084         for (i = 0; i < d->num_desc; i++) {
3085                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3086
3087                 if (d->prg_cpu[i] != NULL) {
3088                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3089                 } else {
3090                         PRINT(KERN_ERR,
3091                               "Failed to allocate at dma prg");
3092                         free_dma_trm_ctx(d);
3093                         return -ENOMEM;
3094                 }
3095         }
3096
3097         spin_lock_init(&d->lock);
3098
3099         /* initialize tasklet */
3100         if (type == DMA_CTX_ISO) {
3101                 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3102                                           dma_trm_tasklet, (unsigned long) d);
3103                 if (ohci1394_register_iso_tasklet(ohci,
3104                                                   &ohci->it_legacy_tasklet) < 0) {
3105                         PRINT(KERN_ERR, "No IT DMA context available");
3106                         free_dma_trm_ctx(d);
3107                         return -EBUSY;
3108                 }
3109
3110                 /* IT can be assigned to any context by register_iso_tasklet */
3111                 d->ctx = ohci->it_legacy_tasklet.context;
3112                 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3113                 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3114                 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3115         } else {
3116                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3117                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3118                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3119                 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3120         }
3121
3122         return 0;
3123 }
3124
3125 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3126 {
3127         struct ti_ohci *ohci = host->hostdata;
3128
3129         reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3130         reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3131
3132         memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3133 }
3134
3135
3136 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3137                                  quadlet_t data, quadlet_t compare)
3138 {
3139         struct ti_ohci *ohci = host->hostdata;
3140         int i;
3141
3142         reg_write(ohci, OHCI1394_CSRData, data);
3143         reg_write(ohci, OHCI1394_CSRCompareData, compare);
3144         reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3145
3146         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3147                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3148                         break;
3149
3150                 mdelay(1);
3151         }
3152
3153         return reg_read(ohci, OHCI1394_CSRData);
3154 }
3155
3156 static struct hpsb_host_driver ohci1394_driver = {
3157         .owner =                THIS_MODULE,
3158         .name =                 OHCI1394_DRIVER_NAME,
3159         .set_hw_config_rom =    ohci_set_hw_config_rom,
3160         .transmit_packet =      ohci_transmit,
3161         .devctl =               ohci_devctl,
3162         .isoctl =               ohci_isoctl,
3163         .hw_csr_reg =           ohci_hw_csr_reg,
3164 };
3165
3166 /***********************************
3167  * PCI Driver Interface functions  *
3168  ***********************************/
3169
3170 #define FAIL(err, fmt, args...)                 \
3171 do {                                            \
3172         PRINT_G(KERN_ERR, fmt , ## args);       \
3173         ohci1394_pci_remove(dev);               \
3174         return err;                             \
3175 } while (0)
3176
3177 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3178                                         const struct pci_device_id *ent)
3179 {
3180         struct hpsb_host *host;
3181         struct ti_ohci *ohci;   /* shortcut to currently handled device */
3182         resource_size_t ohci_base;
3183
3184 #ifdef CONFIG_PPC_PMAC
3185         /* Necessary on some machines if ohci1394 was loaded/ unloaded before */
3186         if (machine_is(powermac)) {
3187                 struct device_node *ofn = pci_device_to_OF_node(dev);
3188
3189                 if (ofn) {
3190                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3191                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3192                 }
3193         }
3194 #endif /* CONFIG_PPC_PMAC */
3195
3196         if (pci_enable_device(dev))
3197                 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3198         pci_set_master(dev);
3199
3200         host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3201         if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3202
3203         ohci = host->hostdata;
3204         ohci->dev = dev;
3205         ohci->host = host;
3206         ohci->init_state = OHCI_INIT_ALLOC_HOST;
3207         host->pdev = dev;
3208         pci_set_drvdata(dev, ohci);
3209
3210         /* We don't want hardware swapping */
3211         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3212
3213         /* Some oddball Apple controllers do not order the selfid
3214          * properly, so we make up for it here.  */
3215 #ifndef __LITTLE_ENDIAN
3216         /* XXX: Need a better way to check this. I'm wondering if we can
3217          * read the values of the OHCI1394_PCI_HCI_Control and the
3218          * noByteSwapData registers to see if they were not cleared to
3219          * zero. Should this work? Obviously it's not defined what these
3220          * registers will read when they aren't supported. Bleh! */
3221         if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3222             dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3223                 ohci->no_swap_incoming = 1;
3224                 ohci->selfid_swap = 0;
3225         } else
3226                 ohci->selfid_swap = 1;
3227 #endif
3228
3229
3230 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3231 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3232 #endif
3233
3234         /* These chipsets require a bit of extra care when checking after
3235          * a busreset.  */
3236         if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3237              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3238             (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3239              dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3240                 ohci->check_busreset = 1;
3241
3242         /* We hardwire the MMIO length, since some CardBus adaptors
3243          * fail to report the right length.  Anyway, the ohci spec
3244          * clearly says it's 2kb, so this shouldn't be a problem. */
3245         ohci_base = pci_resource_start(dev, 0);
3246         if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3247                 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3248                       (unsigned long long)pci_resource_len(dev, 0));
3249
3250         if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3251                                 OHCI1394_DRIVER_NAME))
3252                 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
3253                         (unsigned long long)ohci_base,
3254                         (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3255         ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3256
3257         ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3258         if (ohci->registers == NULL)
3259                 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3260         ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3261         DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3262
3263         /* csr_config rom allocation */
3264         ohci->csr_config_rom_cpu =
3265                 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3266                                      &ohci->csr_config_rom_bus);
3267         if (ohci->csr_config_rom_cpu == NULL)
3268                 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3269         ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3270
3271         /* self-id dma buffer allocation */
3272         ohci->selfid_buf_cpu =
3273                 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3274                       &ohci->selfid_buf_bus);
3275         if (ohci->selfid_buf_cpu == NULL)
3276                 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3277         ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3278
3279         if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3280                 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3281                       "8Kb boundary... may cause problems on some CXD3222 chip",
3282                       ohci->selfid_buf_cpu);
3283
3284         /* No self-id errors at startup */
3285         ohci->self_id_errors = 0;
3286
3287         ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3288         /* AR DMA request context allocation */
3289         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3290                               DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3291                               AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3292                               OHCI1394_AsReqRcvContextBase) < 0)
3293                 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3294
3295         /* AR DMA response context allocation */
3296         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3297                               DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3298                               AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3299                               OHCI1394_AsRspRcvContextBase) < 0)
3300                 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3301
3302         /* AT DMA request context */
3303         if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3304                               DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3305                               OHCI1394_AsReqTrContextBase) < 0)
3306                 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3307
3308         /* AT DMA response context */
3309         if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3310                               DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3311                               OHCI1394_AsRspTrContextBase) < 0)
3312                 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3313
3314         /* Start off with a soft reset, to clear everything to a sane
3315          * state. */
3316         ohci_soft_reset(ohci);
3317
3318         /* Now enable LPS, which we need in order to start accessing
3319          * most of the registers.  In fact, on some cards (ALI M5251),
3320          * accessing registers in the SClk domain without LPS enabled
3321          * will lock up the machine.  Wait 50msec to make sure we have
3322          * full link enabled.  */
3323         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3324
3325         /* Disable and clear interrupts */
3326         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3327         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3328
3329         mdelay(50);
3330
3331         /* Determine the number of available IR and IT contexts. */
3332         ohci->nb_iso_rcv_ctx =
3333                 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3334         ohci->nb_iso_xmit_ctx =
3335                 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3336
3337         /* Set the usage bits for non-existent contexts so they can't
3338          * be allocated */
3339         ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3340         ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3341
3342         INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3343         spin_lock_init(&ohci->iso_tasklet_list_lock);
3344         ohci->ISO_channel_usage = 0;
3345         spin_lock_init(&ohci->IR_channel_lock);
3346
3347         /* Allocate the IR DMA context right here so we don't have
3348          * to do it in interrupt path - note that this doesn't
3349          * waste much memory and avoids the jugglery required to
3350          * allocate it in IRQ path. */
3351         if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3352                               DMA_CTX_ISO, 0, IR_NUM_DESC,
3353                               IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3354                               OHCI1394_IsoRcvContextBase) < 0) {
3355                 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3356         }
3357
3358         /* We hopefully don't have to pre-allocate IT DMA like we did
3359          * for IR DMA above. Allocate it on-demand and mark inactive. */
3360         ohci->it_legacy_context.ohci = NULL;
3361         spin_lock_init(&ohci->event_lock);
3362
3363         /*
3364          * interrupts are disabled, all right, but... due to IRQF_SHARED we
3365          * might get called anyway.  We'll see no event, of course, but
3366          * we need to get to that "no event", so enough should be initialized
3367          * by that point.
3368          */
3369         if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3370                          OHCI1394_DRIVER_NAME, ohci))
3371                 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3372
3373         ohci->init_state = OHCI_INIT_HAVE_IRQ;
3374         ohci_initialize(ohci);
3375
3376         /* Set certain csr values */
3377         host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3378         host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3379         host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3380         host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3381         host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3382
3383         if (phys_dma) {
3384                 host->low_addr_space =
3385                         (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3386                 if (!host->low_addr_space)
3387                         host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3388         }
3389         host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3390
3391         /* Tell the highlevel this host is ready */
3392         if (hpsb_add_host(host))
3393                 FAIL(-ENOMEM, "Failed to register host with highlevel");
3394
3395         ohci->init_state = OHCI_INIT_DONE;
3396
3397         return 0;
3398 #undef FAIL
3399 }
3400
3401 static void ohci1394_pci_remove(struct pci_dev *pdev)
3402 {
3403         struct ti_ohci *ohci;
3404         struct device *dev;
3405
3406         ohci = pci_get_drvdata(pdev);
3407         if (!ohci)
3408                 return;
3409
3410         dev = get_device(&ohci->host->device);
3411
3412         switch (ohci->init_state) {
3413         case OHCI_INIT_DONE:
3414                 hpsb_remove_host(ohci->host);
3415
3416                 /* Clear out BUS Options */
3417                 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3418                 reg_write(ohci, OHCI1394_BusOptions,
3419                           (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3420                           0x00ff0000);
3421                 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3422
3423         case OHCI_INIT_HAVE_IRQ:
3424                 /* Clear interrupt registers */
3425                 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3426                 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3427                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3428                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3429                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3430                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3431
3432                 /* Disable IRM Contender */
3433                 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3434
3435                 /* Clear link control register */
3436                 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3437
3438                 /* Let all other nodes know to ignore us */
3439                 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3440
3441                 /* Soft reset before we start - this disables
3442                  * interrupts and clears linkEnable and LPS. */
3443                 ohci_soft_reset(ohci);
3444                 free_irq(ohci->dev->irq, ohci);
3445
3446         case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3447                 /* The ohci_soft_reset() stops all DMA contexts, so we
3448                  * dont need to do this.  */
3449                 free_dma_rcv_ctx(&ohci->ar_req_context);
3450                 free_dma_rcv_ctx(&ohci->ar_resp_context);
3451                 free_dma_trm_ctx(&ohci->at_req_context);
3452                 free_dma_trm_ctx(&ohci->at_resp_context);
3453                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3454                 free_dma_trm_ctx(&ohci->it_legacy_context);
3455
3456         case OHCI_INIT_HAVE_SELFID_BUFFER:
3457                 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3458                                     ohci->selfid_buf_cpu,
3459                                     ohci->selfid_buf_bus);
3460
3461         case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3462                 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3463                                     ohci->csr_config_rom_cpu,
3464                                     ohci->csr_config_rom_bus);
3465
3466         case OHCI_INIT_HAVE_IOMAPPING:
3467                 iounmap(ohci->registers);
3468
3469         case OHCI_INIT_HAVE_MEM_REGION:
3470                 release_mem_region(pci_resource_start(ohci->dev, 0),
3471                                    OHCI1394_REGISTER_SIZE);
3472
3473 #ifdef CONFIG_PPC_PMAC
3474         /* On UniNorth, power down the cable and turn off the chip clock
3475          * to save power on laptops */
3476         if (machine_is(powermac)) {
3477                 struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
3478
3479                 if (ofn) {
3480                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3481                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3482                 }
3483         }
3484 #endif /* CONFIG_PPC_PMAC */
3485
3486         case OHCI_INIT_ALLOC_HOST:
3487                 pci_set_drvdata(ohci->dev, NULL);
3488         }
3489
3490         if (dev)
3491                 put_device(dev);
3492 }
3493
3494 #ifdef CONFIG_PM
3495 static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3496 {
3497         int err;
3498         struct ti_ohci *ohci = pci_get_drvdata(pdev);
3499
3500         if (!ohci) {
3501                 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3502                        OHCI1394_DRIVER_NAME);
3503                 return -ENXIO;
3504         }
3505         DBGMSG("suspend called");
3506
3507         /* Clear the async DMA contexts and stop using the controller */
3508         hpsb_bus_reset(ohci->host);
3509
3510         /* See ohci1394_pci_remove() for comments on this sequence */
3511         reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3512         reg_write(ohci, OHCI1394_BusOptions,
3513                   (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3514                   0x00ff0000);
3515         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3516         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3517         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3518         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3519         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3520         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3521         set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3522         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3523         ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3524         ohci_soft_reset(ohci);
3525
3526         err = pci_save_state(pdev);
3527         if (err) {
3528                 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3529                 return err;
3530         }
3531         err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
3532         if (err)
3533                 DBGMSG("pci_set_power_state failed with %d", err);
3534
3535 /* PowerMac suspend code comes last */
3536 #ifdef CONFIG_PPC_PMAC
3537         if (machine_is(powermac)) {
3538                 struct device_node *ofn = pci_device_to_OF_node(pdev);
3539
3540                 if (ofn)
3541                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3542         }
3543 #endif /* CONFIG_PPC_PMAC */
3544
3545         return 0;
3546 }
3547
3548 static int ohci1394_pci_resume(struct pci_dev *pdev)
3549 {
3550         int err;
3551         struct ti_ohci *ohci = pci_get_drvdata(pdev);
3552
3553         if (!ohci) {
3554                 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3555                        OHCI1394_DRIVER_NAME);
3556                 return -ENXIO;
3557         }
3558         DBGMSG("resume called");
3559
3560 /* PowerMac resume code comes first */
3561 #ifdef CONFIG_PPC_PMAC
3562         if (machine_is(powermac)) {
3563                 struct device_node *ofn = pci_device_to_OF_node(pdev);
3564
3565                 if (ofn)
3566                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3567         }
3568 #endif /* CONFIG_PPC_PMAC */
3569
3570         pci_set_power_state(pdev, PCI_D0);
3571         pci_restore_state(pdev);
3572         err = pci_enable_device(pdev);
3573         if (err) {
3574                 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3575                 return err;
3576         }
3577
3578         /* See ohci1394_pci_probe() for comments on this sequence */
3579         ohci_soft_reset(ohci);
3580         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3581         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3582         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3583         mdelay(50);
3584         ohci_initialize(ohci);
3585
3586         hpsb_resume_host(ohci->host);
3587         return 0;
3588 }
3589 #endif /* CONFIG_PM */
3590
3591 static struct pci_device_id ohci1394_pci_tbl[] = {
3592         {
3593                 .class =        PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3594                 .class_mask =   PCI_ANY_ID,
3595                 .vendor =       PCI_ANY_ID,
3596                 .device =       PCI_ANY_ID,
3597                 .subvendor =    PCI_ANY_ID,
3598                 .subdevice =    PCI_ANY_ID,
3599         },
3600         { 0, },
3601 };
3602
3603 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3604
3605 static struct pci_driver ohci1394_pci_driver = {
3606         .name =         OHCI1394_DRIVER_NAME,
3607         .id_table =     ohci1394_pci_tbl,
3608         .probe =        ohci1394_pci_probe,
3609         .remove =       ohci1394_pci_remove,
3610 #ifdef CONFIG_PM
3611         .resume =       ohci1394_pci_resume,
3612         .suspend =      ohci1394_pci_suspend,
3613 #endif
3614 };
3615
3616 /***********************************
3617  * OHCI1394 Video Interface        *
3618  ***********************************/
3619
3620 /* essentially the only purpose of this code is to allow another
3621    module to hook into ohci's interrupt handler */
3622
3623 /* returns zero if successful, one if DMA context is locked up */
3624 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3625 {
3626         int i=0;
3627
3628         /* stop the channel program if it's still running */
3629         reg_write(ohci, reg, 0x8000);
3630
3631         /* Wait until it effectively stops */
3632         while (reg_read(ohci, reg) & 0x400) {
3633                 i++;
3634                 if (i>5000) {
3635                         PRINT(KERN_ERR,
3636                               "Runaway loop while stopping context: %s...", msg ? msg : "");
3637                         return 1;
3638                 }
3639
3640                 mb();
3641                 udelay(10);
3642         }
3643         if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3644         return 0;
3645 }
3646
3647 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3648                                void (*func)(unsigned long), unsigned long data)
3649 {
3650         tasklet_init(&tasklet->tasklet, func, data);
3651         tasklet->type = type;
3652         /* We init the tasklet->link field, so we can list_del() it
3653          * without worrying whether it was added to the list or not. */
3654         INIT_LIST_HEAD(&tasklet->link);
3655 }
3656
3657 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3658                                   struct ohci1394_iso_tasklet *tasklet)
3659 {
3660         unsigned long flags, *usage;
3661         int n, i, r = -EBUSY;
3662
3663         if (tasklet->type == OHCI_ISO_TRANSMIT) {
3664                 n = ohci->nb_iso_xmit_ctx;
3665                 usage = &ohci->it_ctx_usage;
3666         }
3667         else {
3668                 n = ohci->nb_iso_rcv_ctx;
3669                 usage = &ohci->ir_ctx_usage;
3670
3671                 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3672                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3673                         if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3674                                 return r;
3675                         }
3676                 }
3677         }
3678
3679         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3680
3681         for (i = 0; i < n; i++)
3682                 if (!test_and_set_bit(i, usage)) {
3683                         tasklet->context = i;
3684                         list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3685                         r = 0;
3686                         break;
3687                 }
3688
3689         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3690
3691         return r;
3692 }
3693
3694 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3695                                      struct ohci1394_iso_tasklet *tasklet)
3696 {
3697         unsigned long flags;
3698
3699         tasklet_kill(&tasklet->tasklet);
3700
3701         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3702
3703         if (tasklet->type == OHCI_ISO_TRANSMIT)
3704                 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3705         else {
3706                 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3707
3708                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3709                         clear_bit(0, &ohci->ir_multichannel_used);
3710                 }
3711         }
3712
3713         list_del(&tasklet->link);
3714
3715         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3716 }
3717
3718 EXPORT_SYMBOL(ohci1394_stop_context);
3719 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3720 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3721 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3722
3723 /***********************************
3724  * General module initialization   *
3725  ***********************************/
3726
3727 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3728 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3729 MODULE_LICENSE("GPL");
3730
3731 static void __exit ohci1394_cleanup (void)
3732 {
3733         pci_unregister_driver(&ohci1394_pci_driver);
3734 }
3735
3736 static int __init ohci1394_init(void)
3737 {
3738         return pci_register_driver(&ohci1394_pci_driver);
3739 }
3740
3741 /* Register before most other device drivers.
3742  * Useful for remote debugging via physical DMA, e.g. using firescope. */
3743 fs_initcall(ohci1394_init);
3744 module_exit(ohci1394_cleanup);