ieee1394: ohci1394: remove unnecessary rcvPhyPkt bit flipping in LinkControl register
[safe/jmp/linux-2.6] / drivers / ieee1394 / ohci1394.c
1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Emilie Chung <emilie.chung@axis.com>
51  *  . Tip on Async Request Filter
52  *
53  * Pascal Drolet <pascal.drolet@informission.ca>
54  *  . Various tips for optimization and functionnalities
55  *
56  * Robert Ficklin <rficklin@westengineering.com>
57  *  . Loop in irq_handler
58  *
59  * James Goodwin <jamesg@Filanet.com>
60  *  . Various tips on initialization, self-id reception, etc.
61  *
62  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63  *  . Apple PowerBook detection
64  *
65  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66  *  . Reset the board properly before leaving + misc cleanups
67  *
68  * Leon van Stuivenberg <leonvs@iae.nl>
69  *  . Bug fixes
70  *
71  * Ben Collins <bcollins@debian.org>
72  *  . Working big-endian support
73  *  . Updated to 2.4.x module scheme (PCI aswell)
74  *  . Config ROM generation
75  *
76  * Manfred Weihs <weihs@ict.tuwien.ac.at>
77  *  . Reworked code for initiating bus resets
78  *    (long, short, with or without hold-off)
79  *
80  * Nandu Santhi <contactnandu@users.sourceforge.net>
81  *  . Added support for nVidia nForce2 onboard Firewire chipset
82  *
83  */
84
85 #include <linux/kernel.h>
86 #include <linux/list.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/wait.h>
90 #include <linux/errno.h>
91 #include <linux/module.h>
92 #include <linux/moduleparam.h>
93 #include <linux/pci.h>
94 #include <linux/fs.h>
95 #include <linux/poll.h>
96 #include <asm/byteorder.h>
97 #include <asm/atomic.h>
98 #include <asm/uaccess.h>
99 #include <linux/delay.h>
100 #include <linux/spinlock.h>
101
102 #include <asm/pgtable.h>
103 #include <asm/page.h>
104 #include <asm/irq.h>
105 #include <linux/types.h>
106 #include <linux/vmalloc.h>
107 #include <linux/init.h>
108
109 #ifdef CONFIG_PPC_PMAC
110 #include <asm/machdep.h>
111 #include <asm/pmac_feature.h>
112 #include <asm/prom.h>
113 #include <asm/pci-bridge.h>
114 #endif
115
116 #include "csr1212.h"
117 #include "ieee1394.h"
118 #include "ieee1394_types.h"
119 #include "hosts.h"
120 #include "dma.h"
121 #include "iso.h"
122 #include "ieee1394_core.h"
123 #include "highlevel.h"
124 #include "ohci1394.h"
125
126 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
127 #define OHCI1394_DEBUG
128 #endif
129
130 #ifdef DBGMSG
131 #undef DBGMSG
132 #endif
133
134 #ifdef OHCI1394_DEBUG
135 #define DBGMSG(fmt, args...) \
136 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
137 #else
138 #define DBGMSG(fmt, args...) do {} while (0)
139 #endif
140
141 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
142 #define OHCI_DMA_ALLOC(fmt, args...) \
143         HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
144                 ++global_outstanding_dmas, ## args)
145 #define OHCI_DMA_FREE(fmt, args...) \
146         HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
147                 --global_outstanding_dmas, ## args)
148 static int global_outstanding_dmas = 0;
149 #else
150 #define OHCI_DMA_ALLOC(fmt, args...) do {} while (0)
151 #define OHCI_DMA_FREE(fmt, args...) do {} while (0)
152 #endif
153
154 /* print general (card independent) information */
155 #define PRINT_G(level, fmt, args...) \
156 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
157
158 /* print card specific information */
159 #define PRINT(level, fmt, args...) \
160 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
161
162 /* Module Parameters */
163 static int phys_dma = 1;
164 module_param(phys_dma, int, 0444);
165 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
166
167 static void dma_trm_tasklet(unsigned long data);
168 static void dma_trm_reset(struct dma_trm_ctx *d);
169
170 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
171                              enum context_type type, int ctx, int num_desc,
172                              int buf_size, int split_buf_size, int context_base);
173 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
174 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
175
176 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
177                              enum context_type type, int ctx, int num_desc,
178                              int context_base);
179
180 static void ohci1394_pci_remove(struct pci_dev *pdev);
181
182 #ifndef __LITTLE_ENDIAN
183 static const size_t hdr_sizes[] = {
184         3,      /* TCODE_WRITEQ */
185         4,      /* TCODE_WRITEB */
186         3,      /* TCODE_WRITE_RESPONSE */
187         0,      /* reserved */
188         3,      /* TCODE_READQ */
189         4,      /* TCODE_READB */
190         3,      /* TCODE_READQ_RESPONSE */
191         4,      /* TCODE_READB_RESPONSE */
192         1,      /* TCODE_CYCLE_START */
193         4,      /* TCODE_LOCK_REQUEST */
194         2,      /* TCODE_ISO_DATA */
195         4,      /* TCODE_LOCK_RESPONSE */
196                 /* rest is reserved or link-internal */
197 };
198
199 static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
200 {
201         size_t size;
202
203         if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
204                 return;
205
206         size = hdr_sizes[tcode];
207         while (size--)
208                 data[size] = le32_to_cpu(data[size]);
209 }
210 #else
211 #define header_le32_to_cpu(w,x) do {} while (0)
212 #endif /* !LITTLE_ENDIAN */
213
214 /***********************************
215  * IEEE-1394 functionality section *
216  ***********************************/
217
218 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
219 {
220         int i;
221         unsigned long flags;
222         quadlet_t r;
223
224         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
225
226         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
227
228         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
229                 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
230                         break;
231
232                 mdelay(1);
233         }
234
235         r = reg_read(ohci, OHCI1394_PhyControl);
236
237         if (i >= OHCI_LOOP_COUNT)
238                 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
239                        r, r & 0x80000000, i);
240
241         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
242
243         return (r & 0x00ff0000) >> 16;
244 }
245
246 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
247 {
248         int i;
249         unsigned long flags;
250         u32 r = 0;
251
252         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
253
254         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
255
256         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
257                 r = reg_read(ohci, OHCI1394_PhyControl);
258                 if (!(r & 0x00004000))
259                         break;
260
261                 mdelay(1);
262         }
263
264         if (i == OHCI_LOOP_COUNT)
265                 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
266                        r, r & 0x00004000, i);
267
268         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
269
270         return;
271 }
272
273 /* Or's our value into the current value */
274 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
275 {
276         u8 old;
277
278         old = get_phy_reg (ohci, addr);
279         old |= data;
280         set_phy_reg (ohci, addr, old);
281
282         return;
283 }
284
285 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
286                                 int phyid, int isroot)
287 {
288         quadlet_t *q = ohci->selfid_buf_cpu;
289         quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
290         size_t size;
291         quadlet_t q0, q1;
292
293         /* Check status of self-id reception */
294
295         if (ohci->selfid_swap)
296                 q0 = le32_to_cpu(q[0]);
297         else
298                 q0 = q[0];
299
300         if ((self_id_count & 0x80000000) ||
301             ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
302                 PRINT(KERN_ERR,
303                       "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
304                       self_id_count, q0, ohci->self_id_errors);
305
306                 /* Tip by James Goodwin <jamesg@Filanet.com>:
307                  * We had an error, generate another bus reset in response.  */
308                 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
309                         set_phy_reg_mask (ohci, 1, 0x40);
310                         ohci->self_id_errors++;
311                 } else {
312                         PRINT(KERN_ERR,
313                               "Too many errors on SelfID error reception, giving up!");
314                 }
315                 return;
316         }
317
318         /* SelfID Ok, reset error counter. */
319         ohci->self_id_errors = 0;
320
321         size = ((self_id_count & 0x00001FFC) >> 2) - 1;
322         q++;
323
324         while (size > 0) {
325                 if (ohci->selfid_swap) {
326                         q0 = le32_to_cpu(q[0]);
327                         q1 = le32_to_cpu(q[1]);
328                 } else {
329                         q0 = q[0];
330                         q1 = q[1];
331                 }
332
333                 if (q0 == ~q1) {
334                         DBGMSG ("SelfID packet 0x%x received", q0);
335                         hpsb_selfid_received(host, cpu_to_be32(q0));
336                         if (((q0 & 0x3f000000) >> 24) == phyid)
337                                 DBGMSG ("SelfID for this node is 0x%08x", q0);
338                 } else {
339                         PRINT(KERN_ERR,
340                               "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
341                 }
342                 q += 2;
343                 size -= 2;
344         }
345
346         DBGMSG("SelfID complete");
347
348         return;
349 }
350
351 static void ohci_soft_reset(struct ti_ohci *ohci) {
352         int i;
353
354         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
355
356         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
357                 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
358                         break;
359                 mdelay(1);
360         }
361         DBGMSG ("Soft reset finished");
362 }
363
364
365 /* Generate the dma receive prgs and start the context */
366 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
367 {
368         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
369         int i;
370
371         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
372
373         for (i=0; i<d->num_desc; i++) {
374                 u32 c;
375
376                 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
377                 if (generate_irq)
378                         c |= DMA_CTL_IRQ;
379
380                 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
381
382                 /* End of descriptor list? */
383                 if (i + 1 < d->num_desc) {
384                         d->prg_cpu[i]->branchAddress =
385                                 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
386                 } else {
387                         d->prg_cpu[i]->branchAddress =
388                                 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
389                 }
390
391                 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
392                 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
393         }
394
395         d->buf_ind = 0;
396         d->buf_offset = 0;
397
398         if (d->type == DMA_CTX_ISO) {
399                 /* Clear contextControl */
400                 reg_write(ohci, d->ctrlClear, 0xffffffff);
401
402                 /* Set bufferFill, isochHeader, multichannel for IR context */
403                 reg_write(ohci, d->ctrlSet, 0xd0000000);
404
405                 /* Set the context match register to match on all tags */
406                 reg_write(ohci, d->ctxtMatch, 0xf0000000);
407
408                 /* Clear the multi channel mask high and low registers */
409                 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
410                 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
411
412                 /* Set up isoRecvIntMask to generate interrupts */
413                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
414         }
415
416         /* Tell the controller where the first AR program is */
417         reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
418
419         /* Run context */
420         reg_write(ohci, d->ctrlSet, 0x00008000);
421
422         DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
423 }
424
425 /* Initialize the dma transmit context */
426 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
427 {
428         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
429
430         /* Stop the context */
431         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
432
433         d->prg_ind = 0;
434         d->sent_ind = 0;
435         d->free_prgs = d->num_desc;
436         d->branchAddrPtr = NULL;
437         INIT_LIST_HEAD(&d->fifo_list);
438         INIT_LIST_HEAD(&d->pending_list);
439
440         if (d->type == DMA_CTX_ISO) {
441                 /* enable interrupts */
442                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
443         }
444
445         DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
446 }
447
448 /* Count the number of available iso contexts */
449 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
450 {
451         int i,ctx=0;
452         u32 tmp;
453
454         reg_write(ohci, reg, 0xffffffff);
455         tmp = reg_read(ohci, reg);
456
457         DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
458
459         /* Count the number of contexts */
460         for (i=0; i<32; i++) {
461                 if (tmp & 1) ctx++;
462                 tmp >>= 1;
463         }
464         return ctx;
465 }
466
467 /* Global initialization */
468 static void ohci_initialize(struct ti_ohci *ohci)
469 {
470         quadlet_t buf;
471         int num_ports, i;
472
473         spin_lock_init(&ohci->phy_reg_lock);
474
475         /* Put some defaults to these undefined bus options */
476         buf = reg_read(ohci, OHCI1394_BusOptions);
477         buf |=  0x60000000; /* Enable CMC and ISC */
478         if (hpsb_disable_irm)
479                 buf &= ~0x80000000;
480         else
481                 buf |=  0x80000000; /* Enable IRMC */
482         buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
483         buf &= ~0x18000000; /* Disable PMC and BMC */
484         reg_write(ohci, OHCI1394_BusOptions, buf);
485
486         /* Set the bus number */
487         reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
488
489         /* Enable posted writes */
490         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
491
492         /* Clear link control register */
493         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
494
495         /* Enable cycle timer and cycle master and set the IRM
496          * contender bit in our self ID packets if appropriate. */
497         reg_write(ohci, OHCI1394_LinkControlSet,
498                   OHCI1394_LinkControl_CycleTimerEnable |
499                   OHCI1394_LinkControl_CycleMaster);
500         i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
501         if (hpsb_disable_irm)
502                 i &= ~PHY_04_CONTENDER;
503         else
504                 i |= PHY_04_CONTENDER;
505         set_phy_reg(ohci, 4, i);
506
507         /* Set up self-id dma buffer */
508         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
509
510         /* enable self-id */
511         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
512
513         /* Set the Config ROM mapping register */
514         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
515
516         /* Now get our max packet size */
517         ohci->max_packet_size =
518                 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
519                 
520         /* Clear the interrupt mask */
521         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
522         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
523
524         /* Clear the interrupt mask */
525         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
526         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
527
528         /* Initialize AR dma */
529         initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
530         initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
531
532         /* Initialize AT dma */
533         initialize_dma_trm_ctx(&ohci->at_req_context);
534         initialize_dma_trm_ctx(&ohci->at_resp_context);
535         
536         /* Initialize IR Legacy DMA channel mask */
537         ohci->ir_legacy_channels = 0;
538
539         /* Accept AR requests from all nodes */
540         reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
541
542         /* Set the address range of the physical response unit.
543          * Most controllers do not implement it as a writable register though.
544          * They will keep a hardwired offset of 0x00010000 and show 0x0 as
545          * register content.
546          * To actually enable physical responses is the job of our interrupt
547          * handler which programs the physical request filter. */
548         reg_write(ohci, OHCI1394_PhyUpperBound,
549                   OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
550
551         DBGMSG("physUpperBoundOffset=%08x",
552                reg_read(ohci, OHCI1394_PhyUpperBound));
553
554         /* Specify AT retries */
555         reg_write(ohci, OHCI1394_ATRetries,
556                   OHCI1394_MAX_AT_REQ_RETRIES |
557                   (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
558                   (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
559
560         /* We don't want hardware swapping */
561         reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
562
563         /* Enable interrupts */
564         reg_write(ohci, OHCI1394_IntMaskSet,
565                   OHCI1394_unrecoverableError |
566                   OHCI1394_masterIntEnable |
567                   OHCI1394_busReset |
568                   OHCI1394_selfIDComplete |
569                   OHCI1394_RSPkt |
570                   OHCI1394_RQPkt |
571                   OHCI1394_respTxComplete |
572                   OHCI1394_reqTxComplete |
573                   OHCI1394_isochRx |
574                   OHCI1394_isochTx |
575                   OHCI1394_postedWriteErr |
576                   OHCI1394_cycleTooLong |
577                   OHCI1394_cycleInconsistent);
578
579         /* Enable link */
580         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
581
582         buf = reg_read(ohci, OHCI1394_Version);
583         PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d]  "
584               "MMIO=[%llx-%llx]  Max Packet=[%d]  IR/IT contexts=[%d/%d]",
585               ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
586               ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
587               (unsigned long long)pci_resource_start(ohci->dev, 0),
588               (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
589               ohci->max_packet_size,
590               ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
591
592         /* Check all of our ports to make sure that if anything is
593          * connected, we enable that port. */
594         num_ports = get_phy_reg(ohci, 2) & 0xf;
595         for (i = 0; i < num_ports; i++) {
596                 unsigned int status;
597
598                 set_phy_reg(ohci, 7, i);
599                 status = get_phy_reg(ohci, 8);
600
601                 if (status & 0x20)
602                         set_phy_reg(ohci, 8, status & ~1);
603         }
604
605         /* Serial EEPROM Sanity check. */
606         if ((ohci->max_packet_size < 512) ||
607             (ohci->max_packet_size > 4096)) {
608                 /* Serial EEPROM contents are suspect, set a sane max packet
609                  * size and print the raw contents for bug reports if verbose
610                  * debug is enabled. */
611 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
612                 int i;
613 #endif
614
615                 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
616                       "attempting to setting max_packet_size to 512 bytes");
617                 reg_write(ohci, OHCI1394_BusOptions,
618                           (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
619                 ohci->max_packet_size = 512;
620 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
621                 PRINT(KERN_DEBUG, "    EEPROM Present: %d",
622                       (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
623                 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
624
625                 for (i = 0;
626                      ((i < 1000) &&
627                       (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
628                         udelay(10);
629
630                 for (i = 0; i < 0x20; i++) {
631                         reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
632                         PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
633                               (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
634                 }
635 #endif
636         }
637 }
638
639 /*
640  * Insert a packet in the DMA fifo and generate the DMA prg
641  * FIXME: rewrite the program in order to accept packets crossing
642  *        page boundaries.
643  *        check also that a single dma descriptor doesn't cross a
644  *        page boundary.
645  */
646 static void insert_packet(struct ti_ohci *ohci,
647                           struct dma_trm_ctx *d, struct hpsb_packet *packet)
648 {
649         u32 cycleTimer;
650         int idx = d->prg_ind;
651
652         DBGMSG("Inserting packet for node " NODE_BUS_FMT
653                ", tlabel=%d, tcode=0x%x, speed=%d",
654                NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
655                packet->tcode, packet->speed_code);
656
657         d->prg_cpu[idx]->begin.address = 0;
658         d->prg_cpu[idx]->begin.branchAddress = 0;
659
660         if (d->type == DMA_CTX_ASYNC_RESP) {
661                 /*
662                  * For response packets, we need to put a timeout value in
663                  * the 16 lower bits of the status... let's try 1 sec timeout
664                  */
665                 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
666                 d->prg_cpu[idx]->begin.status = cpu_to_le32(
667                         (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
668                         ((cycleTimer&0x01fff000)>>12));
669
670                 DBGMSG("cycleTimer: %08x timeStamp: %08x",
671                        cycleTimer, d->prg_cpu[idx]->begin.status);
672         } else 
673                 d->prg_cpu[idx]->begin.status = 0;
674
675         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
676
677                 if (packet->type == hpsb_raw) {
678                         d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
679                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
680                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
681                 } else {
682                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
683                                 (packet->header[0] & 0xFFFF);
684
685                         if (packet->tcode == TCODE_ISO_DATA) {
686                                 /* Sending an async stream packet */
687                                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
688                         } else {
689                                 /* Sending a normal async request or response */
690                                 d->prg_cpu[idx]->data[1] =
691                                         (packet->header[1] & 0xFFFF) |
692                                         (packet->header[0] & 0xFFFF0000);
693                                 d->prg_cpu[idx]->data[2] = packet->header[2];
694                                 d->prg_cpu[idx]->data[3] = packet->header[3];
695                         }
696                         header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
697                 }
698
699                 if (packet->data_size) { /* block transmit */
700                         if (packet->tcode == TCODE_STREAM_DATA){
701                                 d->prg_cpu[idx]->begin.control =
702                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
703                                                     DMA_CTL_IMMEDIATE | 0x8);
704                         } else {
705                                 d->prg_cpu[idx]->begin.control =
706                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
707                                                     DMA_CTL_IMMEDIATE | 0x10);
708                         }
709                         d->prg_cpu[idx]->end.control =
710                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
711                                             DMA_CTL_IRQ |
712                                             DMA_CTL_BRANCH |
713                                             packet->data_size);
714                         /*
715                          * Check that the packet data buffer
716                          * does not cross a page boundary.
717                          *
718                          * XXX Fix this some day. eth1394 seems to trigger
719                          * it, but ignoring it doesn't seem to cause a
720                          * problem.
721                          */
722 #if 0
723                         if (cross_bound((unsigned long)packet->data,
724                                         packet->data_size)>0) {
725                                 /* FIXME: do something about it */
726                                 PRINT(KERN_ERR,
727                                       "%s: packet data addr: %p size %Zd bytes "
728                                       "cross page boundary", __FUNCTION__,
729                                       packet->data, packet->data_size);
730                         }
731 #endif
732                         d->prg_cpu[idx]->end.address = cpu_to_le32(
733                                 pci_map_single(ohci->dev, packet->data,
734                                                packet->data_size,
735                                                PCI_DMA_TODEVICE));
736                         OHCI_DMA_ALLOC("single, block transmit packet");
737
738                         d->prg_cpu[idx]->end.branchAddress = 0;
739                         d->prg_cpu[idx]->end.status = 0;
740                         if (d->branchAddrPtr)
741                                 *(d->branchAddrPtr) =
742                                         cpu_to_le32(d->prg_bus[idx] | 0x3);
743                         d->branchAddrPtr =
744                                 &(d->prg_cpu[idx]->end.branchAddress);
745                 } else { /* quadlet transmit */
746                         if (packet->type == hpsb_raw)
747                                 d->prg_cpu[idx]->begin.control =
748                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
749                                                     DMA_CTL_IMMEDIATE |
750                                                     DMA_CTL_IRQ |
751                                                     DMA_CTL_BRANCH |
752                                                     (packet->header_size + 4));
753                         else
754                                 d->prg_cpu[idx]->begin.control =
755                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
756                                                     DMA_CTL_IMMEDIATE |
757                                                     DMA_CTL_IRQ |
758                                                     DMA_CTL_BRANCH |
759                                                     packet->header_size);
760
761                         if (d->branchAddrPtr)
762                                 *(d->branchAddrPtr) =
763                                         cpu_to_le32(d->prg_bus[idx] | 0x2);
764                         d->branchAddrPtr =
765                                 &(d->prg_cpu[idx]->begin.branchAddress);
766                 }
767
768         } else { /* iso packet */
769                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
770                         (packet->header[0] & 0xFFFF);
771                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
772                 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
773
774                 d->prg_cpu[idx]->begin.control =
775                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
776                                     DMA_CTL_IMMEDIATE | 0x8);
777                 d->prg_cpu[idx]->end.control =
778                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
779                                     DMA_CTL_UPDATE |
780                                     DMA_CTL_IRQ |
781                                     DMA_CTL_BRANCH |
782                                     packet->data_size);
783                 d->prg_cpu[idx]->end.address = cpu_to_le32(
784                                 pci_map_single(ohci->dev, packet->data,
785                                 packet->data_size, PCI_DMA_TODEVICE));
786                 OHCI_DMA_ALLOC("single, iso transmit packet");
787
788                 d->prg_cpu[idx]->end.branchAddress = 0;
789                 d->prg_cpu[idx]->end.status = 0;
790                 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
791                        "                       begin=%08x %08x %08x %08x\n"
792                        "                             %08x %08x %08x %08x\n"
793                        "                       end  =%08x %08x %08x %08x",
794                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
795                        d->prg_cpu[idx]->begin.control,
796                        d->prg_cpu[idx]->begin.address,
797                        d->prg_cpu[idx]->begin.branchAddress,
798                        d->prg_cpu[idx]->begin.status,
799                        d->prg_cpu[idx]->data[0],
800                        d->prg_cpu[idx]->data[1],
801                        d->prg_cpu[idx]->data[2],
802                        d->prg_cpu[idx]->data[3],
803                        d->prg_cpu[idx]->end.control,
804                        d->prg_cpu[idx]->end.address,
805                        d->prg_cpu[idx]->end.branchAddress,
806                        d->prg_cpu[idx]->end.status);
807                 if (d->branchAddrPtr)
808                         *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
809                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
810         }
811         d->free_prgs--;
812
813         /* queue the packet in the appropriate context queue */
814         list_add_tail(&packet->driver_list, &d->fifo_list);
815         d->prg_ind = (d->prg_ind + 1) % d->num_desc;
816 }
817
818 /*
819  * This function fills the FIFO with the (eventual) pending packets
820  * and runs or wakes up the DMA prg if necessary.
821  *
822  * The function MUST be called with the d->lock held.
823  */
824 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
825 {
826         struct hpsb_packet *packet, *ptmp;
827         int idx = d->prg_ind;
828         int z = 0;
829
830         /* insert the packets into the dma fifo */
831         list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
832                 if (!d->free_prgs)
833                         break;
834
835                 /* For the first packet only */
836                 if (!z)
837                         z = (packet->data_size) ? 3 : 2;
838
839                 /* Insert the packet */
840                 list_del_init(&packet->driver_list);
841                 insert_packet(ohci, d, packet);
842         }
843
844         /* Nothing must have been done, either no free_prgs or no packets */
845         if (z == 0)
846                 return;
847
848         /* Is the context running ? (should be unless it is
849            the first packet to be sent in this context) */
850         if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
851                 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
852
853                 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
854                 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
855
856                 /* Check that the node id is valid, and not 63 */
857                 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
858                         PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
859                 else
860                         reg_write(ohci, d->ctrlSet, 0x8000);
861         } else {
862                 /* Wake up the dma context if necessary */
863                 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
864                         DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
865
866                 /* do this always, to avoid race condition */
867                 reg_write(ohci, d->ctrlSet, 0x1000);
868         }
869
870         return;
871 }
872
873 /* Transmission of an async or iso packet */
874 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
875 {
876         struct ti_ohci *ohci = host->hostdata;
877         struct dma_trm_ctx *d;
878         unsigned long flags;
879
880         if (packet->data_size > ohci->max_packet_size) {
881                 PRINT(KERN_ERR,
882                       "Transmit packet size %Zd is too big",
883                       packet->data_size);
884                 return -EOVERFLOW;
885         }
886
887         /* Decide whether we have an iso, a request, or a response packet */
888         if (packet->type == hpsb_raw)
889                 d = &ohci->at_req_context;
890         else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
891                 /* The legacy IT DMA context is initialized on first
892                  * use.  However, the alloc cannot be run from
893                  * interrupt context, so we bail out if that is the
894                  * case. I don't see anyone sending ISO packets from
895                  * interrupt context anyway... */
896
897                 if (ohci->it_legacy_context.ohci == NULL) {
898                         if (in_interrupt()) {
899                                 PRINT(KERN_ERR,
900                                       "legacy IT context cannot be initialized during interrupt");
901                                 return -EINVAL;
902                         }
903
904                         if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
905                                               DMA_CTX_ISO, 0, IT_NUM_DESC,
906                                               OHCI1394_IsoXmitContextBase) < 0) {
907                                 PRINT(KERN_ERR,
908                                       "error initializing legacy IT context");
909                                 return -ENOMEM;
910                         }
911
912                         initialize_dma_trm_ctx(&ohci->it_legacy_context);
913                 }
914
915                 d = &ohci->it_legacy_context;
916         } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
917                 d = &ohci->at_resp_context;
918         else
919                 d = &ohci->at_req_context;
920
921         spin_lock_irqsave(&d->lock,flags);
922
923         list_add_tail(&packet->driver_list, &d->pending_list);
924
925         dma_trm_flush(ohci, d);
926
927         spin_unlock_irqrestore(&d->lock,flags);
928
929         return 0;
930 }
931
932 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
933 {
934         struct ti_ohci *ohci = host->hostdata;
935         int retval = 0;
936         unsigned long flags;
937         int phy_reg;
938
939         switch (cmd) {
940         case RESET_BUS:
941                 switch (arg) {
942                 case SHORT_RESET:
943                         phy_reg = get_phy_reg(ohci, 5);
944                         phy_reg |= 0x40;
945                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
946                         break;
947                 case LONG_RESET:
948                         phy_reg = get_phy_reg(ohci, 1);
949                         phy_reg |= 0x40;
950                         set_phy_reg(ohci, 1, phy_reg); /* set IBR */
951                         break;
952                 case SHORT_RESET_NO_FORCE_ROOT:
953                         phy_reg = get_phy_reg(ohci, 1);
954                         if (phy_reg & 0x80) {
955                                 phy_reg &= ~0x80;
956                                 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
957                         }
958
959                         phy_reg = get_phy_reg(ohci, 5);
960                         phy_reg |= 0x40;
961                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
962                         break;
963                 case LONG_RESET_NO_FORCE_ROOT:
964                         phy_reg = get_phy_reg(ohci, 1);
965                         phy_reg &= ~0x80;
966                         phy_reg |= 0x40;
967                         set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
968                         break;
969                 case SHORT_RESET_FORCE_ROOT:
970                         phy_reg = get_phy_reg(ohci, 1);
971                         if (!(phy_reg & 0x80)) {
972                                 phy_reg |= 0x80;
973                                 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
974                         }
975
976                         phy_reg = get_phy_reg(ohci, 5);
977                         phy_reg |= 0x40;
978                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
979                         break;
980                 case LONG_RESET_FORCE_ROOT:
981                         phy_reg = get_phy_reg(ohci, 1);
982                         phy_reg |= 0xc0;
983                         set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
984                         break;
985                 default:
986                         retval = -1;
987                 }
988                 break;
989
990         case GET_CYCLE_COUNTER:
991                 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
992                 break;
993
994         case SET_CYCLE_COUNTER:
995                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
996                 break;
997
998         case SET_BUS_ID:
999                 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1000                 break;
1001
1002         case ACT_CYCLE_MASTER:
1003                 if (arg) {
1004                         /* check if we are root and other nodes are present */
1005                         u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1006                         if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1007                                 /*
1008                                  * enable cycleTimer, cycleMaster
1009                                  */
1010                                 DBGMSG("Cycle master enabled");
1011                                 reg_write(ohci, OHCI1394_LinkControlSet,
1012                                           OHCI1394_LinkControl_CycleTimerEnable |
1013                                           OHCI1394_LinkControl_CycleMaster);
1014                         }
1015                 } else {
1016                         /* disable cycleTimer, cycleMaster, cycleSource */
1017                         reg_write(ohci, OHCI1394_LinkControlClear,
1018                                   OHCI1394_LinkControl_CycleTimerEnable |
1019                                   OHCI1394_LinkControl_CycleMaster |
1020                                   OHCI1394_LinkControl_CycleSource);
1021                 }
1022                 break;
1023
1024         case CANCEL_REQUESTS:
1025                 DBGMSG("Cancel request received");
1026                 dma_trm_reset(&ohci->at_req_context);
1027                 dma_trm_reset(&ohci->at_resp_context);
1028                 break;
1029
1030         case ISO_LISTEN_CHANNEL:
1031         {
1032                 u64 mask;
1033                 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1034                 int ir_legacy_active;
1035
1036                 if (arg<0 || arg>63) {
1037                         PRINT(KERN_ERR,
1038                               "%s: IS0 listen channel %d is out of range",
1039                               __FUNCTION__, arg);
1040                         return -EFAULT;
1041                 }
1042
1043                 mask = (u64)0x1<<arg;
1044
1045                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1046
1047                 if (ohci->ISO_channel_usage & mask) {
1048                         PRINT(KERN_ERR,
1049                               "%s: IS0 listen channel %d is already used",
1050                               __FUNCTION__, arg);
1051                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1052                         return -EFAULT;
1053                 }
1054
1055                 ir_legacy_active = ohci->ir_legacy_channels;
1056
1057                 ohci->ISO_channel_usage |= mask;
1058                 ohci->ir_legacy_channels |= mask;
1059
1060                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1061
1062                 if (!ir_legacy_active) {
1063                         if (ohci1394_register_iso_tasklet(ohci,
1064                                           &ohci->ir_legacy_tasklet) < 0) {
1065                                 PRINT(KERN_ERR, "No IR DMA context available");
1066                                 return -EBUSY;
1067                         }
1068
1069                         /* the IR context can be assigned to any DMA context
1070                          * by ohci1394_register_iso_tasklet */
1071                         d->ctx = ohci->ir_legacy_tasklet.context;
1072                         d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1073                                 32*d->ctx;
1074                         d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1075                                 32*d->ctx;
1076                         d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1077                         d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1078
1079                         initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1080
1081                         if (printk_ratelimit())
1082                                 DBGMSG("IR legacy activated");
1083                 }
1084
1085                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1086
1087                 if (arg>31)
1088                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1089                                   1<<(arg-32));
1090                 else
1091                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1092                                   1<<arg);
1093
1094                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1095                 DBGMSG("Listening enabled on channel %d", arg);
1096                 break;
1097         }
1098         case ISO_UNLISTEN_CHANNEL:
1099         {
1100                 u64 mask;
1101
1102                 if (arg<0 || arg>63) {
1103                         PRINT(KERN_ERR,
1104                               "%s: IS0 unlisten channel %d is out of range",
1105                               __FUNCTION__, arg);
1106                         return -EFAULT;
1107                 }
1108
1109                 mask = (u64)0x1<<arg;
1110
1111                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1112
1113                 if (!(ohci->ISO_channel_usage & mask)) {
1114                         PRINT(KERN_ERR,
1115                               "%s: IS0 unlisten channel %d is not used",
1116                               __FUNCTION__, arg);
1117                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1118                         return -EFAULT;
1119                 }
1120
1121                 ohci->ISO_channel_usage &= ~mask;
1122                 ohci->ir_legacy_channels &= ~mask;
1123
1124                 if (arg>31)
1125                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1126                                   1<<(arg-32));
1127                 else
1128                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1129                                   1<<arg);
1130
1131                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1132                 DBGMSG("Listening disabled on channel %d", arg);
1133
1134                 if (ohci->ir_legacy_channels == 0) {
1135                         stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1136                         DBGMSG("ISO legacy receive context stopped");
1137                 }
1138
1139                 break;
1140         }
1141         default:
1142                 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1143                         cmd);
1144                 break;
1145         }
1146         return retval;
1147 }
1148
1149 /***********************************
1150  * rawiso ISO reception            *
1151  ***********************************/
1152
1153 /*
1154   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1155   buffer is split into "blocks" (regions described by one DMA
1156   descriptor). Each block must be one page or less in size, and
1157   must not cross a page boundary.
1158
1159   There is one little wrinkle with buffer-fill mode: a packet that
1160   starts in the final block may wrap around into the first block. But
1161   the user API expects all packets to be contiguous. Our solution is
1162   to keep the very last page of the DMA buffer in reserve - if a
1163   packet spans the gap, we copy its tail into this page.
1164 */
1165
1166 struct ohci_iso_recv {
1167         struct ti_ohci *ohci;
1168
1169         struct ohci1394_iso_tasklet task;
1170         int task_active;
1171
1172         enum { BUFFER_FILL_MODE = 0,
1173                PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1174
1175         /* memory and PCI mapping for the DMA descriptors */
1176         struct dma_prog_region prog;
1177         struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1178
1179         /* how many DMA blocks fit in the buffer */
1180         unsigned int nblocks;
1181
1182         /* stride of DMA blocks */
1183         unsigned int buf_stride;
1184
1185         /* number of blocks to batch between interrupts */
1186         int block_irq_interval;
1187
1188         /* block that DMA will finish next */
1189         int block_dma;
1190
1191         /* (buffer-fill only) block that the reader will release next */
1192         int block_reader;
1193
1194         /* (buffer-fill only) bytes of buffer the reader has released,
1195            less than one block */
1196         int released_bytes;
1197
1198         /* (buffer-fill only) buffer offset at which the next packet will appear */
1199         int dma_offset;
1200
1201         /* OHCI DMA context control registers */
1202         u32 ContextControlSet;
1203         u32 ContextControlClear;
1204         u32 CommandPtr;
1205         u32 ContextMatch;
1206 };
1207
1208 static void ohci_iso_recv_task(unsigned long data);
1209 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1210 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1211 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1212 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1213
1214 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1215 {
1216         struct ti_ohci *ohci = iso->host->hostdata;
1217         struct ohci_iso_recv *recv;
1218         int ctx;
1219         int ret = -ENOMEM;
1220
1221         recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1222         if (!recv)
1223                 return -ENOMEM;
1224
1225         iso->hostdata = recv;
1226         recv->ohci = ohci;
1227         recv->task_active = 0;
1228         dma_prog_region_init(&recv->prog);
1229         recv->block = NULL;
1230
1231         /* use buffer-fill mode, unless irq_interval is 1
1232            (note: multichannel requires buffer-fill) */
1233
1234         if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1235              iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1236                 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1237         } else {
1238                 recv->dma_mode = BUFFER_FILL_MODE;
1239         }
1240
1241         /* set nblocks, buf_stride, block_irq_interval */
1242
1243         if (recv->dma_mode == BUFFER_FILL_MODE) {
1244                 recv->buf_stride = PAGE_SIZE;
1245
1246                 /* one block per page of data in the DMA buffer, minus the final guard page */
1247                 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1248                 if (recv->nblocks < 3) {
1249                         DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1250                         goto err;
1251                 }
1252
1253                 /* iso->irq_interval is in packets - translate that to blocks */
1254                 if (iso->irq_interval == 1)
1255                         recv->block_irq_interval = 1;
1256                 else
1257                         recv->block_irq_interval = iso->irq_interval *
1258                                                         ((recv->nblocks+1)/iso->buf_packets);
1259                 if (recv->block_irq_interval*4 > recv->nblocks)
1260                         recv->block_irq_interval = recv->nblocks/4;
1261                 if (recv->block_irq_interval < 1)
1262                         recv->block_irq_interval = 1;
1263
1264         } else {
1265                 int max_packet_size;
1266
1267                 recv->nblocks = iso->buf_packets;
1268                 recv->block_irq_interval = iso->irq_interval;
1269                 if (recv->block_irq_interval * 4 > iso->buf_packets)
1270                         recv->block_irq_interval = iso->buf_packets / 4;
1271                 if (recv->block_irq_interval < 1)
1272                 recv->block_irq_interval = 1;
1273
1274                 /* choose a buffer stride */
1275                 /* must be a power of 2, and <= PAGE_SIZE */
1276
1277                 max_packet_size = iso->buf_size / iso->buf_packets;
1278
1279                 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1280                     recv->buf_stride *= 2);
1281
1282                 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1283                    recv->buf_stride > PAGE_SIZE) {
1284                         /* this shouldn't happen, but anyway... */
1285                         DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1286                         goto err;
1287                 }
1288         }
1289
1290         recv->block_reader = 0;
1291         recv->released_bytes = 0;
1292         recv->block_dma = 0;
1293         recv->dma_offset = 0;
1294
1295         /* size of DMA program = one descriptor per block */
1296         if (dma_prog_region_alloc(&recv->prog,
1297                                  sizeof(struct dma_cmd) * recv->nblocks,
1298                                  recv->ohci->dev))
1299                 goto err;
1300
1301         recv->block = (struct dma_cmd*) recv->prog.kvirt;
1302
1303         ohci1394_init_iso_tasklet(&recv->task,
1304                                   iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1305                                                        OHCI_ISO_RECEIVE,
1306                                   ohci_iso_recv_task, (unsigned long) iso);
1307
1308         if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1309                 ret = -EBUSY;
1310                 goto err;
1311         }
1312
1313         recv->task_active = 1;
1314
1315         /* recv context registers are spaced 32 bytes apart */
1316         ctx = recv->task.context;
1317         recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1318         recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1319         recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1320         recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1321
1322         if (iso->channel == -1) {
1323                 /* clear multi-channel selection mask */
1324                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1325                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1326         }
1327
1328         /* write the DMA program */
1329         ohci_iso_recv_program(iso);
1330
1331         DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1332                " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1333                recv->dma_mode == BUFFER_FILL_MODE ?
1334                "buffer-fill" : "packet-per-buffer",
1335                iso->buf_size/PAGE_SIZE, iso->buf_size,
1336                recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1337
1338         return 0;
1339
1340 err:
1341         ohci_iso_recv_shutdown(iso);
1342         return ret;
1343 }
1344
1345 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1346 {
1347         struct ohci_iso_recv *recv = iso->hostdata;
1348
1349         /* disable interrupts */
1350         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1351
1352         /* halt DMA */
1353         ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1354 }
1355
1356 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1357 {
1358         struct ohci_iso_recv *recv = iso->hostdata;
1359
1360         if (recv->task_active) {
1361                 ohci_iso_recv_stop(iso);
1362                 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1363                 recv->task_active = 0;
1364         }
1365
1366         dma_prog_region_free(&recv->prog);
1367         kfree(recv);
1368         iso->hostdata = NULL;
1369 }
1370
1371 /* set up a "gapped" ring buffer DMA program */
1372 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1373 {
1374         struct ohci_iso_recv *recv = iso->hostdata;
1375         int blk;
1376
1377         /* address of 'branch' field in previous DMA descriptor */
1378         u32 *prev_branch = NULL;
1379
1380         for (blk = 0; blk < recv->nblocks; blk++) {
1381                 u32 control;
1382
1383                 /* the DMA descriptor */
1384                 struct dma_cmd *cmd = &recv->block[blk];
1385
1386                 /* offset of the DMA descriptor relative to the DMA prog buffer */
1387                 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1388
1389                 /* offset of this packet's data within the DMA buffer */
1390                 unsigned long buf_offset = blk * recv->buf_stride;
1391
1392                 if (recv->dma_mode == BUFFER_FILL_MODE) {
1393                         control = 2 << 28; /* INPUT_MORE */
1394                 } else {
1395                         control = 3 << 28; /* INPUT_LAST */
1396                 }
1397
1398                 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1399
1400                 /* interrupt on last block, and at intervals */
1401                 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1402                         control |= 3 << 20; /* want interrupt */
1403                 }
1404
1405                 control |= 3 << 18; /* enable branch to address */
1406                 control |= recv->buf_stride;
1407
1408                 cmd->control = cpu_to_le32(control);
1409                 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1410                 cmd->branchAddress = 0; /* filled in on next loop */
1411                 cmd->status = cpu_to_le32(recv->buf_stride);
1412
1413                 /* link the previous descriptor to this one */
1414                 if (prev_branch) {
1415                         *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1416                 }
1417
1418                 prev_branch = &cmd->branchAddress;
1419         }
1420
1421         /* the final descriptor's branch address and Z should be left at 0 */
1422 }
1423
1424 /* listen or unlisten to a specific channel (multi-channel mode only) */
1425 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1426 {
1427         struct ohci_iso_recv *recv = iso->hostdata;
1428         int reg, i;
1429
1430         if (channel < 32) {
1431                 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1432                 i = channel;
1433         } else {
1434                 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1435                 i = channel - 32;
1436         }
1437
1438         reg_write(recv->ohci, reg, (1 << i));
1439
1440         /* issue a dummy read to force all PCI writes to be posted immediately */
1441         mb();
1442         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1443 }
1444
1445 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1446 {
1447         struct ohci_iso_recv *recv = iso->hostdata;
1448         int i;
1449
1450         for (i = 0; i < 64; i++) {
1451                 if (mask & (1ULL << i)) {
1452                         if (i < 32)
1453                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1454                         else
1455                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1456                 } else {
1457                         if (i < 32)
1458                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1459                         else
1460                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1461                 }
1462         }
1463
1464         /* issue a dummy read to force all PCI writes to be posted immediately */
1465         mb();
1466         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1467 }
1468
1469 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1470 {
1471         struct ohci_iso_recv *recv = iso->hostdata;
1472         struct ti_ohci *ohci = recv->ohci;
1473         u32 command, contextMatch;
1474
1475         reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1476         wmb();
1477
1478         /* always keep ISO headers */
1479         command = (1 << 30);
1480
1481         if (recv->dma_mode == BUFFER_FILL_MODE)
1482                 command |= (1 << 31);
1483
1484         reg_write(recv->ohci, recv->ContextControlSet, command);
1485
1486         /* match on specified tags */
1487         contextMatch = tag_mask << 28;
1488
1489         if (iso->channel == -1) {
1490                 /* enable multichannel reception */
1491                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1492         } else {
1493                 /* listen on channel */
1494                 contextMatch |= iso->channel;
1495         }
1496
1497         if (cycle != -1) {
1498                 u32 seconds;
1499
1500                 /* enable cycleMatch */
1501                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1502
1503                 /* set starting cycle */
1504                 cycle &= 0x1FFF;
1505
1506                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1507                    just snarf them from the current time */
1508                 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1509
1510                 /* advance one second to give some extra time for DMA to start */
1511                 seconds += 1;
1512
1513                 cycle |= (seconds & 3) << 13;
1514
1515                 contextMatch |= cycle << 12;
1516         }
1517
1518         if (sync != -1) {
1519                 /* set sync flag on first DMA descriptor */
1520                 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1521                 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1522
1523                 /* match sync field */
1524                 contextMatch |= (sync&0xf)<<8;
1525         }
1526
1527         reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1528
1529         /* address of first descriptor block */
1530         command = dma_prog_region_offset_to_bus(&recv->prog,
1531                                                 recv->block_dma * sizeof(struct dma_cmd));
1532         command |= 1; /* Z=1 */
1533
1534         reg_write(recv->ohci, recv->CommandPtr, command);
1535
1536         /* enable interrupts */
1537         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1538
1539         wmb();
1540
1541         /* run */
1542         reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1543
1544         /* issue a dummy read of the cycle timer register to force
1545            all PCI writes to be posted immediately */
1546         mb();
1547         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1548
1549         /* check RUN */
1550         if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1551                 PRINT(KERN_ERR,
1552                       "Error starting IR DMA (ContextControl 0x%08x)\n",
1553                       reg_read(recv->ohci, recv->ContextControlSet));
1554                 return -1;
1555         }
1556
1557         return 0;
1558 }
1559
1560 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1561 {
1562         /* re-use the DMA descriptor for the block */
1563         /* by linking the previous descriptor to it */
1564
1565         int next_i = block;
1566         int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1567
1568         struct dma_cmd *next = &recv->block[next_i];
1569         struct dma_cmd *prev = &recv->block[prev_i];
1570         
1571         /* ignore out-of-range requests */
1572         if ((block < 0) || (block > recv->nblocks))
1573                 return;
1574
1575         /* 'next' becomes the new end of the DMA chain,
1576            so disable branch and enable interrupt */
1577         next->branchAddress = 0;
1578         next->control |= cpu_to_le32(3 << 20);
1579         next->status = cpu_to_le32(recv->buf_stride);
1580
1581         /* link prev to next */
1582         prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1583                                                                         sizeof(struct dma_cmd) * next_i)
1584                                           | 1); /* Z=1 */
1585
1586         /* disable interrupt on previous DMA descriptor, except at intervals */
1587         if ((prev_i % recv->block_irq_interval) == 0) {
1588                 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1589         } else {
1590                 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1591         }
1592         wmb();
1593
1594         /* wake up DMA in case it fell asleep */
1595         reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1596 }
1597
1598 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1599                                              struct hpsb_iso_packet_info *info)
1600 {
1601         /* release the memory where the packet was */
1602         recv->released_bytes += info->total_len;
1603
1604         /* have we released enough memory for one block? */
1605         while (recv->released_bytes > recv->buf_stride) {
1606                 ohci_iso_recv_release_block(recv, recv->block_reader);
1607                 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1608                 recv->released_bytes -= recv->buf_stride;
1609         }
1610 }
1611
1612 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1613 {
1614         struct ohci_iso_recv *recv = iso->hostdata;
1615         if (recv->dma_mode == BUFFER_FILL_MODE) {
1616                 ohci_iso_recv_bufferfill_release(recv, info);
1617         } else {
1618                 ohci_iso_recv_release_block(recv, info - iso->infos);
1619         }
1620 }
1621
1622 /* parse all packets from blocks that have been fully received */
1623 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1624 {
1625         int wake = 0;
1626         int runaway = 0;
1627         struct ti_ohci *ohci = recv->ohci;
1628
1629         while (1) {
1630                 /* we expect the next parsable packet to begin at recv->dma_offset */
1631                 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1632
1633                 unsigned int offset;
1634                 unsigned short len, cycle, total_len;
1635                 unsigned char channel, tag, sy;
1636
1637                 unsigned char *p = iso->data_buf.kvirt;
1638
1639                 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1640
1641                 /* don't loop indefinitely */
1642                 if (runaway++ > 100000) {
1643                         atomic_inc(&iso->overflows);
1644                         PRINT(KERN_ERR,
1645                               "IR DMA error - Runaway during buffer parsing!\n");
1646                         break;
1647                 }
1648
1649                 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1650                 if (this_block == recv->block_dma)
1651                         break;
1652
1653                 wake = 1;
1654
1655                 /* parse data length, tag, channel, and sy */
1656
1657                 /* note: we keep our own local copies of 'len' and 'offset'
1658                    so the user can't mess with them by poking in the mmap area */
1659
1660                 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1661
1662                 if (len > 4096) {
1663                         PRINT(KERN_ERR,
1664                               "IR DMA error - bogus 'len' value %u\n", len);
1665                 }
1666
1667                 channel = p[recv->dma_offset+1] & 0x3F;
1668                 tag = p[recv->dma_offset+1] >> 6;
1669                 sy = p[recv->dma_offset+0] & 0xF;
1670
1671                 /* advance to data payload */
1672                 recv->dma_offset += 4;
1673
1674                 /* check for wrap-around */
1675                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1676                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1677                 }
1678
1679                 /* dma_offset now points to the first byte of the data payload */
1680                 offset = recv->dma_offset;
1681
1682                 /* advance to xferStatus/timeStamp */
1683                 recv->dma_offset += len;
1684
1685                 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1686                 /* payload is padded to 4 bytes */
1687                 if (len % 4) {
1688                         recv->dma_offset += 4 - (len%4);
1689                         total_len += 4 - (len%4);
1690                 }
1691
1692                 /* check for wrap-around */
1693                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1694                         /* uh oh, the packet data wraps from the last
1695                            to the first DMA block - make the packet
1696                            contiguous by copying its "tail" into the
1697                            guard page */
1698
1699                         int guard_off = recv->buf_stride*recv->nblocks;
1700                         int tail_len = len - (guard_off - offset);
1701
1702                         if (tail_len > 0  && tail_len < recv->buf_stride) {
1703                                 memcpy(iso->data_buf.kvirt + guard_off,
1704                                        iso->data_buf.kvirt,
1705                                        tail_len);
1706                         }
1707
1708                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1709                 }
1710
1711                 /* parse timestamp */
1712                 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1713                 cycle &= 0x1FFF;
1714
1715                 /* advance to next packet */
1716                 recv->dma_offset += 4;
1717
1718                 /* check for wrap-around */
1719                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1720                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1721                 }
1722
1723                 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1724         }
1725
1726         if (wake)
1727                 hpsb_iso_wake(iso);
1728 }
1729
1730 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1731 {
1732         int loop;
1733         struct ti_ohci *ohci = recv->ohci;
1734
1735         /* loop over all blocks */
1736         for (loop = 0; loop < recv->nblocks; loop++) {
1737
1738                 /* check block_dma to see if it's done */
1739                 struct dma_cmd *im = &recv->block[recv->block_dma];
1740
1741                 /* check the DMA descriptor for new writes to xferStatus */
1742                 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1743
1744                 /* rescount is the number of bytes *remaining to be written* in the block */
1745                 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1746
1747                 unsigned char event = xferstatus & 0x1F;
1748
1749                 if (!event) {
1750                         /* nothing has happened to this block yet */
1751                         break;
1752                 }
1753
1754                 if (event != 0x11) {
1755                         atomic_inc(&iso->overflows);
1756                         PRINT(KERN_ERR,
1757                               "IR DMA error - OHCI error code 0x%02x\n", event);
1758                 }
1759
1760                 if (rescount != 0) {
1761                         /* the card is still writing to this block;
1762                            we can't touch it until it's done */
1763                         break;
1764                 }
1765
1766                 /* OK, the block is finished... */
1767
1768                 /* sync our view of the block */
1769                 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1770
1771                 /* reset the DMA descriptor */
1772                 im->status = recv->buf_stride;
1773
1774                 /* advance block_dma */
1775                 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1776
1777                 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1778                         atomic_inc(&iso->overflows);
1779                         DBGMSG("ISO reception overflow - "
1780                                "ran out of DMA blocks");
1781                 }
1782         }
1783
1784         /* parse any packets that have arrived */
1785         ohci_iso_recv_bufferfill_parse(iso, recv);
1786 }
1787
1788 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1789 {
1790         int count;
1791         int wake = 0;
1792         struct ti_ohci *ohci = recv->ohci;
1793
1794         /* loop over the entire buffer */
1795         for (count = 0; count < recv->nblocks; count++) {
1796                 u32 packet_len = 0;
1797
1798                 /* pointer to the DMA descriptor */
1799                 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1800
1801                 /* check the DMA descriptor for new writes to xferStatus */
1802                 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1803                 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1804
1805                 unsigned char event = xferstatus & 0x1F;
1806
1807                 if (!event) {
1808                         /* this packet hasn't come in yet; we are done for now */
1809                         goto out;
1810                 }
1811
1812                 if (event == 0x11) {
1813                         /* packet received successfully! */
1814
1815                         /* rescount is the number of bytes *remaining* in the packet buffer,
1816                            after the packet was written */
1817                         packet_len = recv->buf_stride - rescount;
1818
1819                 } else if (event == 0x02) {
1820                         PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1821                 } else if (event) {
1822                         PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1823                 }
1824
1825                 /* sync our view of the buffer */
1826                 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1827
1828                 /* record the per-packet info */
1829                 {
1830                         /* iso header is 8 bytes ahead of the data payload */
1831                         unsigned char *hdr;
1832
1833                         unsigned int offset;
1834                         unsigned short cycle;
1835                         unsigned char channel, tag, sy;
1836
1837                         offset = iso->pkt_dma * recv->buf_stride;
1838                         hdr = iso->data_buf.kvirt + offset;
1839
1840                         /* skip iso header */
1841                         offset += 8;
1842                         packet_len -= 8;
1843
1844                         cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1845                         channel = hdr[5] & 0x3F;
1846                         tag = hdr[5] >> 6;
1847                         sy = hdr[4] & 0xF;
1848
1849                         hpsb_iso_packet_received(iso, offset, packet_len,
1850                                         recv->buf_stride, cycle, channel, tag, sy);
1851                 }
1852
1853                 /* reset the DMA descriptor */
1854                 il->status = recv->buf_stride;
1855
1856                 wake = 1;
1857                 recv->block_dma = iso->pkt_dma;
1858         }
1859
1860 out:
1861         if (wake)
1862                 hpsb_iso_wake(iso);
1863 }
1864
1865 static void ohci_iso_recv_task(unsigned long data)
1866 {
1867         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1868         struct ohci_iso_recv *recv = iso->hostdata;
1869
1870         if (recv->dma_mode == BUFFER_FILL_MODE)
1871                 ohci_iso_recv_bufferfill_task(iso, recv);
1872         else
1873                 ohci_iso_recv_packetperbuf_task(iso, recv);
1874 }
1875
1876 /***********************************
1877  * rawiso ISO transmission         *
1878  ***********************************/
1879
1880 struct ohci_iso_xmit {
1881         struct ti_ohci *ohci;
1882         struct dma_prog_region prog;
1883         struct ohci1394_iso_tasklet task;
1884         int task_active;
1885
1886         u32 ContextControlSet;
1887         u32 ContextControlClear;
1888         u32 CommandPtr;
1889 };
1890
1891 /* transmission DMA program:
1892    one OUTPUT_MORE_IMMEDIATE for the IT header
1893    one OUTPUT_LAST for the buffer data */
1894
1895 struct iso_xmit_cmd {
1896         struct dma_cmd output_more_immediate;
1897         u8 iso_hdr[8];
1898         u32 unused[2];
1899         struct dma_cmd output_last;
1900 };
1901
1902 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1903 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1904 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1905 static void ohci_iso_xmit_task(unsigned long data);
1906
1907 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1908 {
1909         struct ohci_iso_xmit *xmit;
1910         unsigned int prog_size;
1911         int ctx;
1912         int ret = -ENOMEM;
1913
1914         xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1915         if (!xmit)
1916                 return -ENOMEM;
1917
1918         iso->hostdata = xmit;
1919         xmit->ohci = iso->host->hostdata;
1920         xmit->task_active = 0;
1921
1922         dma_prog_region_init(&xmit->prog);
1923
1924         prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1925
1926         if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1927                 goto err;
1928
1929         ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1930                                   ohci_iso_xmit_task, (unsigned long) iso);
1931
1932         if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1933                 ret = -EBUSY;
1934                 goto err;
1935         }
1936
1937         xmit->task_active = 1;
1938
1939         /* xmit context registers are spaced 16 bytes apart */
1940         ctx = xmit->task.context;
1941         xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1942         xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1943         xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1944
1945         return 0;
1946
1947 err:
1948         ohci_iso_xmit_shutdown(iso);
1949         return ret;
1950 }
1951
1952 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1953 {
1954         struct ohci_iso_xmit *xmit = iso->hostdata;
1955         struct ti_ohci *ohci = xmit->ohci;
1956
1957         /* disable interrupts */
1958         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1959
1960         /* halt DMA */
1961         if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1962                 /* XXX the DMA context will lock up if you try to send too much data! */
1963                 PRINT(KERN_ERR,
1964                       "you probably exceeded the OHCI card's bandwidth limit - "
1965                       "reload the module and reduce xmit bandwidth");
1966         }
1967 }
1968
1969 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1970 {
1971         struct ohci_iso_xmit *xmit = iso->hostdata;
1972
1973         if (xmit->task_active) {
1974                 ohci_iso_xmit_stop(iso);
1975                 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1976                 xmit->task_active = 0;
1977         }
1978
1979         dma_prog_region_free(&xmit->prog);
1980         kfree(xmit);
1981         iso->hostdata = NULL;
1982 }
1983
1984 static void ohci_iso_xmit_task(unsigned long data)
1985 {
1986         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1987         struct ohci_iso_xmit *xmit = iso->hostdata;
1988         struct ti_ohci *ohci = xmit->ohci;
1989         int wake = 0;
1990         int count;
1991
1992         /* check the whole buffer if necessary, starting at pkt_dma */
1993         for (count = 0; count < iso->buf_packets; count++) {
1994                 int cycle;
1995
1996                 /* DMA descriptor */
1997                 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1998
1999                 /* check for new writes to xferStatus */
2000                 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2001                 u8  event = xferstatus & 0x1F;
2002
2003                 if (!event) {
2004                         /* packet hasn't been sent yet; we are done for now */
2005                         break;
2006                 }
2007
2008                 if (event != 0x11)
2009                         PRINT(KERN_ERR,
2010                               "IT DMA error - OHCI error code 0x%02x\n", event);
2011
2012                 /* at least one packet went out, so wake up the writer */
2013                 wake = 1;
2014
2015                 /* parse cycle */
2016                 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2017
2018                 /* tell the subsystem the packet has gone out */
2019                 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2020
2021                 /* reset the DMA descriptor for next time */
2022                 cmd->output_last.status = 0;
2023         }
2024
2025         if (wake)
2026                 hpsb_iso_wake(iso);
2027 }
2028
2029 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2030 {
2031         struct ohci_iso_xmit *xmit = iso->hostdata;
2032         struct ti_ohci *ohci = xmit->ohci;
2033
2034         int next_i, prev_i;
2035         struct iso_xmit_cmd *next, *prev;
2036
2037         unsigned int offset;
2038         unsigned short len;
2039         unsigned char tag, sy;
2040
2041         /* check that the packet doesn't cross a page boundary
2042            (we could allow this if we added OUTPUT_MORE descriptor support) */
2043         if (cross_bound(info->offset, info->len)) {
2044                 PRINT(KERN_ERR,
2045                       "rawiso xmit: packet %u crosses a page boundary",
2046                       iso->first_packet);
2047                 return -EINVAL;
2048         }
2049
2050         offset = info->offset;
2051         len = info->len;
2052         tag = info->tag;
2053         sy = info->sy;
2054
2055         /* sync up the card's view of the buffer */
2056         dma_region_sync_for_device(&iso->data_buf, offset, len);
2057
2058         /* append first_packet to the DMA chain */
2059         /* by linking the previous descriptor to it */
2060         /* (next will become the new end of the DMA chain) */
2061
2062         next_i = iso->first_packet;
2063         prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2064
2065         next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2066         prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2067
2068         /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2069         memset(next, 0, sizeof(struct iso_xmit_cmd));
2070         next->output_more_immediate.control = cpu_to_le32(0x02000008);
2071
2072         /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2073
2074         /* tcode = 0xA, and sy */
2075         next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2076
2077         /* tag and channel number */
2078         next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2079
2080         /* transmission speed */
2081         next->iso_hdr[2] = iso->speed & 0x7;
2082
2083         /* payload size */
2084         next->iso_hdr[6] = len & 0xFF;
2085         next->iso_hdr[7] = len >> 8;
2086
2087         /* set up the OUTPUT_LAST */
2088         next->output_last.control = cpu_to_le32(1 << 28);
2089         next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2090         next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2091         next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2092         next->output_last.control |= cpu_to_le32(len);
2093
2094         /* payload bus address */
2095         next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2096
2097         /* leave branchAddress at zero for now */
2098
2099         /* re-write the previous DMA descriptor to chain to this one */
2100
2101         /* set prev branch address to point to next (Z=3) */
2102         prev->output_last.branchAddress = cpu_to_le32(
2103                 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2104
2105         /* disable interrupt, unless required by the IRQ interval */
2106         if (prev_i % iso->irq_interval) {
2107                 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2108         } else {
2109                 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2110         }
2111
2112         wmb();
2113
2114         /* wake DMA in case it is sleeping */
2115         reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2116
2117         /* issue a dummy read of the cycle timer to force all PCI
2118            writes to be posted immediately */
2119         mb();
2120         reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2121
2122         return 0;
2123 }
2124
2125 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2126 {
2127         struct ohci_iso_xmit *xmit = iso->hostdata;
2128         struct ti_ohci *ohci = xmit->ohci;
2129
2130         /* clear out the control register */
2131         reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2132         wmb();
2133
2134         /* address and length of first descriptor block (Z=3) */
2135         reg_write(xmit->ohci, xmit->CommandPtr,
2136                   dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2137
2138         /* cycle match */
2139         if (cycle != -1) {
2140                 u32 start = cycle & 0x1FFF;
2141
2142                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2143                    just snarf them from the current time */
2144                 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2145
2146                 /* advance one second to give some extra time for DMA to start */
2147                 seconds += 1;
2148
2149                 start |= (seconds & 3) << 13;
2150
2151                 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2152         }
2153
2154         /* enable interrupts */
2155         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2156
2157         /* run */
2158         reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2159         mb();
2160
2161         /* wait 100 usec to give the card time to go active */
2162         udelay(100);
2163
2164         /* check the RUN bit */
2165         if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2166                 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2167                       reg_read(xmit->ohci, xmit->ContextControlSet));
2168                 return -1;
2169         }
2170
2171         return 0;
2172 }
2173
2174 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2175 {
2176
2177         switch(cmd) {
2178         case XMIT_INIT:
2179                 return ohci_iso_xmit_init(iso);
2180         case XMIT_START:
2181                 return ohci_iso_xmit_start(iso, arg);
2182         case XMIT_STOP:
2183                 ohci_iso_xmit_stop(iso);
2184                 return 0;
2185         case XMIT_QUEUE:
2186                 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2187         case XMIT_SHUTDOWN:
2188                 ohci_iso_xmit_shutdown(iso);
2189                 return 0;
2190
2191         case RECV_INIT:
2192                 return ohci_iso_recv_init(iso);
2193         case RECV_START: {
2194                 int *args = (int*) arg;
2195                 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2196         }
2197         case RECV_STOP:
2198                 ohci_iso_recv_stop(iso);
2199                 return 0;
2200         case RECV_RELEASE:
2201                 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2202                 return 0;
2203         case RECV_FLUSH:
2204                 ohci_iso_recv_task((unsigned long) iso);
2205                 return 0;
2206         case RECV_SHUTDOWN:
2207                 ohci_iso_recv_shutdown(iso);
2208                 return 0;
2209         case RECV_LISTEN_CHANNEL:
2210                 ohci_iso_recv_change_channel(iso, arg, 1);
2211                 return 0;
2212         case RECV_UNLISTEN_CHANNEL:
2213                 ohci_iso_recv_change_channel(iso, arg, 0);
2214                 return 0;
2215         case RECV_SET_CHANNEL_MASK:
2216                 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2217                 return 0;
2218
2219         default:
2220                 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2221                         cmd);
2222                 break;
2223         }
2224         return -EINVAL;
2225 }
2226
2227 /***************************************
2228  * IEEE-1394 functionality section END *
2229  ***************************************/
2230
2231
2232 /********************************************************
2233  * Global stuff (interrupt handler, init/shutdown code) *
2234  ********************************************************/
2235
2236 static void dma_trm_reset(struct dma_trm_ctx *d)
2237 {
2238         unsigned long flags;
2239         LIST_HEAD(packet_list);
2240         struct ti_ohci *ohci = d->ohci;
2241         struct hpsb_packet *packet, *ptmp;
2242
2243         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2244
2245         /* Lock the context, reset it and release it. Move the packets
2246          * that were pending in the context to packet_list and free
2247          * them after releasing the lock. */
2248
2249         spin_lock_irqsave(&d->lock, flags);
2250
2251         list_splice(&d->fifo_list, &packet_list);
2252         list_splice(&d->pending_list, &packet_list);
2253         INIT_LIST_HEAD(&d->fifo_list);
2254         INIT_LIST_HEAD(&d->pending_list);
2255
2256         d->branchAddrPtr = NULL;
2257         d->sent_ind = d->prg_ind;
2258         d->free_prgs = d->num_desc;
2259
2260         spin_unlock_irqrestore(&d->lock, flags);
2261
2262         if (list_empty(&packet_list))
2263                 return;
2264
2265         PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2266
2267         /* Now process subsystem callbacks for the packets from this
2268          * context. */
2269         list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2270                 list_del_init(&packet->driver_list);
2271                 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2272         }
2273 }
2274
2275 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2276                                        quadlet_t rx_event,
2277                                        quadlet_t tx_event)
2278 {
2279         struct ohci1394_iso_tasklet *t;
2280         unsigned long mask;
2281         unsigned long flags;
2282
2283         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2284
2285         list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2286                 mask = 1 << t->context;
2287
2288                 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2289                         tasklet_schedule(&t->tasklet);
2290                 else if (rx_event & mask)
2291                         tasklet_schedule(&t->tasklet);
2292         }
2293
2294         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2295 }
2296
2297 static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2298 {
2299         quadlet_t event, node_id;
2300         struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2301         struct hpsb_host *host = ohci->host;
2302         int phyid = -1, isroot = 0;
2303         unsigned long flags;
2304
2305         /* Read and clear the interrupt event register.  Don't clear
2306          * the busReset event, though. This is done when we get the
2307          * selfIDComplete interrupt. */
2308         spin_lock_irqsave(&ohci->event_lock, flags);
2309         event = reg_read(ohci, OHCI1394_IntEventClear);
2310         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2311         spin_unlock_irqrestore(&ohci->event_lock, flags);
2312
2313         if (!event)
2314                 return IRQ_NONE;
2315
2316         /* If event is ~(u32)0 cardbus card was ejected.  In this case
2317          * we just return, and clean up in the ohci1394_pci_remove
2318          * function. */
2319         if (event == ~(u32) 0) {
2320                 DBGMSG("Device removed.");
2321                 return IRQ_NONE;
2322         }
2323
2324         DBGMSG("IntEvent: %08x", event);
2325
2326         if (event & OHCI1394_unrecoverableError) {
2327                 int ctx;
2328                 PRINT(KERN_ERR, "Unrecoverable error!");
2329
2330                 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2331                         PRINT(KERN_ERR, "Async Req Tx Context died: "
2332                                 "ctrl[%08x] cmdptr[%08x]",
2333                                 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2334                                 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2335
2336                 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2337                         PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2338                                 "ctrl[%08x] cmdptr[%08x]",
2339                                 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2340                                 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2341
2342                 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2343                         PRINT(KERN_ERR, "Async Req Rcv Context died: "
2344                                 "ctrl[%08x] cmdptr[%08x]",
2345                                 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2346                                 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2347
2348                 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2349                         PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2350                                 "ctrl[%08x] cmdptr[%08x]",
2351                                 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2352                                 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2353
2354                 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2355                         if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2356                                 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2357                                         "ctrl[%08x] cmdptr[%08x]", ctx,
2358                                         reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2359                                         reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2360                 }
2361
2362                 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2363                         if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2364                                 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2365                                         "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2366                                         reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2367                                         reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2368                                         reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2369                 }
2370
2371                 event &= ~OHCI1394_unrecoverableError;
2372         }
2373         if (event & OHCI1394_postedWriteErr) {
2374                 PRINT(KERN_ERR, "physical posted write error");
2375                 /* no recovery strategy yet, had to involve protocol drivers */
2376                 event &= ~OHCI1394_postedWriteErr;
2377         }
2378         if (event & OHCI1394_cycleTooLong) {
2379                 if(printk_ratelimit())
2380                         PRINT(KERN_WARNING, "isochronous cycle too long");
2381                 else
2382                         DBGMSG("OHCI1394_cycleTooLong");
2383                 reg_write(ohci, OHCI1394_LinkControlSet,
2384                           OHCI1394_LinkControl_CycleMaster);
2385                 event &= ~OHCI1394_cycleTooLong;
2386         }
2387         if (event & OHCI1394_cycleInconsistent) {
2388                 /* We subscribe to the cycleInconsistent event only to
2389                  * clear the corresponding event bit... otherwise,
2390                  * isochronous cycleMatch DMA won't work. */
2391                 DBGMSG("OHCI1394_cycleInconsistent");
2392                 event &= ~OHCI1394_cycleInconsistent;
2393         }
2394         if (event & OHCI1394_busReset) {
2395                 /* The busReset event bit can't be cleared during the
2396                  * selfID phase, so we disable busReset interrupts, to
2397                  * avoid burying the cpu in interrupt requests. */
2398                 spin_lock_irqsave(&ohci->event_lock, flags);
2399                 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2400
2401                 if (ohci->check_busreset) {
2402                         int loop_count = 0;
2403
2404                         udelay(10);
2405
2406                         while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2407                                 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2408
2409                                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2410                                 udelay(10);
2411                                 spin_lock_irqsave(&ohci->event_lock, flags);
2412
2413                                 /* The loop counter check is to prevent the driver
2414                                  * from remaining in this state forever. For the
2415                                  * initial bus reset, the loop continues for ever
2416                                  * and the system hangs, until some device is plugged-in
2417                                  * or out manually into a port! The forced reset seems
2418                                  * to solve this problem. This mainly effects nForce2. */
2419                                 if (loop_count > 10000) {
2420                                         ohci_devctl(host, RESET_BUS, LONG_RESET);
2421                                         DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2422                                         loop_count = 0;
2423                                 }
2424
2425                                 loop_count++;
2426                         }
2427                 }
2428                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2429                 if (!host->in_bus_reset) {
2430                         DBGMSG("irq_handler: Bus reset requested");
2431
2432                         /* Subsystem call */
2433                         hpsb_bus_reset(ohci->host);
2434                 }
2435                 event &= ~OHCI1394_busReset;
2436         }
2437         if (event & OHCI1394_reqTxComplete) {
2438                 struct dma_trm_ctx *d = &ohci->at_req_context;
2439                 DBGMSG("Got reqTxComplete interrupt "
2440                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2441                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2442                         ohci1394_stop_context(ohci, d->ctrlClear,
2443                                               "reqTxComplete");
2444                 else
2445                         dma_trm_tasklet((unsigned long)d);
2446                         //tasklet_schedule(&d->task);
2447                 event &= ~OHCI1394_reqTxComplete;
2448         }
2449         if (event & OHCI1394_respTxComplete) {
2450                 struct dma_trm_ctx *d = &ohci->at_resp_context;
2451                 DBGMSG("Got respTxComplete interrupt "
2452                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2453                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2454                         ohci1394_stop_context(ohci, d->ctrlClear,
2455                                               "respTxComplete");
2456                 else
2457                         tasklet_schedule(&d->task);
2458                 event &= ~OHCI1394_respTxComplete;
2459         }
2460         if (event & OHCI1394_RQPkt) {
2461                 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2462                 DBGMSG("Got RQPkt interrupt status=0x%08X",
2463                        reg_read(ohci, d->ctrlSet));
2464                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2465                         ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2466                 else
2467                         tasklet_schedule(&d->task);
2468                 event &= ~OHCI1394_RQPkt;
2469         }
2470         if (event & OHCI1394_RSPkt) {
2471                 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2472                 DBGMSG("Got RSPkt interrupt status=0x%08X",
2473                        reg_read(ohci, d->ctrlSet));
2474                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2475                         ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2476                 else
2477                         tasklet_schedule(&d->task);
2478                 event &= ~OHCI1394_RSPkt;
2479         }
2480         if (event & OHCI1394_isochRx) {
2481                 quadlet_t rx_event;
2482
2483                 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2484                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2485                 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2486                 event &= ~OHCI1394_isochRx;
2487         }
2488         if (event & OHCI1394_isochTx) {
2489                 quadlet_t tx_event;
2490
2491                 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2492                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2493                 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2494                 event &= ~OHCI1394_isochTx;
2495         }
2496         if (event & OHCI1394_selfIDComplete) {
2497                 if (host->in_bus_reset) {
2498                         node_id = reg_read(ohci, OHCI1394_NodeID);
2499
2500                         if (!(node_id & 0x80000000)) {
2501                                 PRINT(KERN_ERR,
2502                                       "SelfID received, but NodeID invalid "
2503                                       "(probably new bus reset occurred): %08X",
2504                                       node_id);
2505                                 goto selfid_not_valid;
2506                         }
2507
2508                         phyid =  node_id & 0x0000003f;
2509                         isroot = (node_id & 0x40000000) != 0;
2510
2511                         DBGMSG("SelfID interrupt received "
2512                               "(phyid %d, %s)", phyid,
2513                               (isroot ? "root" : "not root"));
2514
2515                         handle_selfid(ohci, host, phyid, isroot);
2516
2517                         /* Clear the bus reset event and re-enable the
2518                          * busReset interrupt.  */
2519                         spin_lock_irqsave(&ohci->event_lock, flags);
2520                         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2521                         reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2522                         spin_unlock_irqrestore(&ohci->event_lock, flags);
2523
2524                         /* Turn on phys dma reception.
2525                          *
2526                          * TODO: Enable some sort of filtering management.
2527                          */
2528                         if (phys_dma) {
2529                                 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2530                                           0xffffffff);
2531                                 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2532                                           0xffffffff);
2533                         }
2534
2535                         DBGMSG("PhyReqFilter=%08x%08x",
2536                                reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2537                                reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2538
2539                         hpsb_selfid_complete(host, phyid, isroot);
2540                 } else
2541                         PRINT(KERN_ERR,
2542                               "SelfID received outside of bus reset sequence");
2543
2544 selfid_not_valid:
2545                 event &= ~OHCI1394_selfIDComplete;
2546         }
2547
2548         /* Make sure we handle everything, just in case we accidentally
2549          * enabled an interrupt that we didn't write a handler for.  */
2550         if (event)
2551                 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2552                       event);
2553
2554         return IRQ_HANDLED;
2555 }
2556
2557 /* Put the buffer back into the dma context */
2558 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2559 {
2560         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2561         DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2562
2563         d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2564         d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2565         idx = (idx + d->num_desc - 1 ) % d->num_desc;
2566         d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2567
2568         /* To avoid a race, ensure 1394 interface hardware sees the inserted
2569          * context program descriptors before it sees the wakeup bit set. */
2570         wmb();
2571         
2572         /* wake up the dma context if necessary */
2573         if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2574                 PRINT(KERN_INFO,
2575                       "Waking dma ctx=%d ... processing is probably too slow",
2576                       d->ctx);
2577         }
2578
2579         /* do this always, to avoid race condition */
2580         reg_write(ohci, d->ctrlSet, 0x1000);
2581 }
2582
2583 #define cond_le32_to_cpu(data, noswap) \
2584         (noswap ? data : le32_to_cpu(data))
2585
2586 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2587                             -1, 0, -1, 0, -1, -1, 16, -1};
2588
2589 /*
2590  * Determine the length of a packet in the buffer
2591  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2592  */
2593 static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2594                                 quadlet_t *buf_ptr, int offset,
2595                                 unsigned char tcode, int noswap)
2596 {
2597         int length = -1;
2598
2599         if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2600                 length = TCODE_SIZE[tcode];
2601                 if (length == 0) {
2602                         if (offset + 12 >= d->buf_size) {
2603                                 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2604                                                 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2605                         } else {
2606                                 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2607                         }
2608                         length += 20;
2609                 }
2610         } else if (d->type == DMA_CTX_ISO) {
2611                 /* Assumption: buffer fill mode with header/trailer */
2612                 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2613         }
2614
2615         if (length > 0 && length % 4)
2616                 length += 4 - (length % 4);
2617
2618         return length;
2619 }
2620
2621 /* Tasklet that processes dma receive buffers */
2622 static void dma_rcv_tasklet (unsigned long data)
2623 {
2624         struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2625         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2626         unsigned int split_left, idx, offset, rescount;
2627         unsigned char tcode;
2628         int length, bytes_left, ack;
2629         unsigned long flags;
2630         quadlet_t *buf_ptr;
2631         char *split_ptr;
2632         char msg[256];
2633
2634         spin_lock_irqsave(&d->lock, flags);
2635
2636         idx = d->buf_ind;
2637         offset = d->buf_offset;
2638         buf_ptr = d->buf_cpu[idx] + offset/4;
2639
2640         rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2641         bytes_left = d->buf_size - rescount - offset;
2642
2643         while (bytes_left > 0) {
2644                 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2645
2646                 /* packet_length() will return < 4 for an error */
2647                 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2648
2649                 if (length < 4) { /* something is wrong */
2650                         sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2651                                 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2652                                 d->ctx, length);
2653                         ohci1394_stop_context(ohci, d->ctrlClear, msg);
2654                         spin_unlock_irqrestore(&d->lock, flags);
2655                         return;
2656                 }
2657
2658                 /* The first case is where we have a packet that crosses
2659                  * over more than one descriptor. The next case is where
2660                  * it's all in the first descriptor.  */
2661                 if ((offset + length) > d->buf_size) {
2662                         DBGMSG("Split packet rcv'd");
2663                         if (length > d->split_buf_size) {
2664                                 ohci1394_stop_context(ohci, d->ctrlClear,
2665                                              "Split packet size exceeded");
2666                                 d->buf_ind = idx;
2667                                 d->buf_offset = offset;
2668                                 spin_unlock_irqrestore(&d->lock, flags);
2669                                 return;
2670                         }
2671
2672                         if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2673                             == d->buf_size) {
2674                                 /* Other part of packet not written yet.
2675                                  * this should never happen I think
2676                                  * anyway we'll get it on the next call.  */
2677                                 PRINT(KERN_INFO,
2678                                       "Got only half a packet!");
2679                                 d->buf_ind = idx;
2680                                 d->buf_offset = offset;
2681                                 spin_unlock_irqrestore(&d->lock, flags);
2682                                 return;
2683                         }
2684
2685                         split_left = length;
2686                         split_ptr = (char *)d->spb;
2687                         memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2688                         split_left -= d->buf_size-offset;
2689                         split_ptr += d->buf_size-offset;
2690                         insert_dma_buffer(d, idx);
2691                         idx = (idx+1) % d->num_desc;
2692                         buf_ptr = d->buf_cpu[idx];
2693                         offset=0;
2694
2695                         while (split_left >= d->buf_size) {
2696                                 memcpy(split_ptr,buf_ptr,d->buf_size);
2697                                 split_ptr += d->buf_size;
2698                                 split_left -= d->buf_size;
2699                                 insert_dma_buffer(d, idx);
2700                                 idx = (idx+1) % d->num_desc;
2701                                 buf_ptr = d->buf_cpu[idx];
2702                         }
2703
2704                         if (split_left > 0) {
2705                                 memcpy(split_ptr, buf_ptr, split_left);
2706                                 offset = split_left;
2707                                 buf_ptr += offset/4;
2708                         }
2709                 } else {
2710                         DBGMSG("Single packet rcv'd");
2711                         memcpy(d->spb, buf_ptr, length);
2712                         offset += length;
2713                         buf_ptr += length/4;
2714                         if (offset==d->buf_size) {
2715                                 insert_dma_buffer(d, idx);
2716                                 idx = (idx+1) % d->num_desc;
2717                                 buf_ptr = d->buf_cpu[idx];
2718                                 offset=0;
2719                         }
2720                 }
2721
2722                 /* We get one phy packet to the async descriptor for each
2723                  * bus reset. We always ignore it.  */
2724                 if (tcode != OHCI1394_TCODE_PHY) {
2725                         if (!ohci->no_swap_incoming)
2726                                 header_le32_to_cpu(d->spb, tcode);
2727                         DBGMSG("Packet received from node"
2728                                 " %d ack=0x%02X spd=%d tcode=0x%X"
2729                                 " length=%d ctx=%d tlabel=%d",
2730                                 (d->spb[1]>>16)&0x3f,
2731                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2732                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2733                                 tcode, length, d->ctx,
2734                                 (d->spb[0]>>10)&0x3f);
2735
2736                         ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2737                                 == 0x11) ? 1 : 0;
2738
2739                         hpsb_packet_received(ohci->host, d->spb,
2740                                              length-4, ack);
2741                 }
2742 #ifdef OHCI1394_DEBUG
2743                 else
2744                         PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2745                                d->ctx);
2746 #endif
2747
2748                 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2749
2750                 bytes_left = d->buf_size - rescount - offset;
2751
2752         }
2753
2754         d->buf_ind = idx;
2755         d->buf_offset = offset;
2756
2757         spin_unlock_irqrestore(&d->lock, flags);
2758 }
2759
2760 /* Bottom half that processes sent packets */
2761 static void dma_trm_tasklet (unsigned long data)
2762 {
2763         struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2764         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2765         struct hpsb_packet *packet, *ptmp;
2766         unsigned long flags;
2767         u32 status, ack;
2768         size_t datasize;
2769
2770         spin_lock_irqsave(&d->lock, flags);
2771
2772         list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2773                 datasize = packet->data_size;
2774                 if (datasize && packet->type != hpsb_raw)
2775                         status = le32_to_cpu(
2776                                 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2777                 else
2778                         status = le32_to_cpu(
2779                                 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2780
2781                 if (status == 0)
2782                         /* this packet hasn't been sent yet*/
2783                         break;
2784
2785 #ifdef OHCI1394_DEBUG
2786                 if (datasize)
2787                         if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2788                                 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2789                                        "ack=0x%X spd=%d dataLength=%d ctx=%d",
2790                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2791                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2792                                        status&0x1f, (status>>5)&0x3,
2793                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2794                                        d->ctx);
2795                         else
2796                                 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2797                                        "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2798                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2799                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2800                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2801                                        status&0x1f, (status>>5)&0x3,
2802                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2803                                        d->ctx);
2804                 else
2805                         DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2806                                "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2807                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2808                                         >>16)&0x3f,
2809                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2810                                         >>4)&0xf,
2811                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2812                                         >>10)&0x3f,
2813                                 status&0x1f, (status>>5)&0x3,
2814                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2815                                 d->ctx);
2816 #endif
2817
2818                 if (status & 0x10) {
2819                         ack = status & 0xf;
2820                 } else {
2821                         switch (status & 0x1f) {
2822                         case EVT_NO_STATUS: /* that should never happen */
2823                         case EVT_RESERVED_A: /* that should never happen */
2824                         case EVT_LONG_PACKET: /* that should never happen */
2825                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2826                                 ack = ACKX_SEND_ERROR;
2827                                 break;
2828                         case EVT_MISSING_ACK:
2829                                 ack = ACKX_TIMEOUT;
2830                                 break;
2831                         case EVT_UNDERRUN:
2832                                 ack = ACKX_SEND_ERROR;
2833                                 break;
2834                         case EVT_OVERRUN: /* that should never happen */
2835                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2836                                 ack = ACKX_SEND_ERROR;
2837                                 break;
2838                         case EVT_DESCRIPTOR_READ:
2839                         case EVT_DATA_READ:
2840                         case EVT_DATA_WRITE:
2841                                 ack = ACKX_SEND_ERROR;
2842                                 break;
2843                         case EVT_BUS_RESET: /* that should never happen */
2844                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2845                                 ack = ACKX_SEND_ERROR;
2846                                 break;
2847                         case EVT_TIMEOUT:
2848                                 ack = ACKX_TIMEOUT;
2849                                 break;
2850                         case EVT_TCODE_ERR:
2851                                 ack = ACKX_SEND_ERROR;
2852                                 break;
2853                         case EVT_RESERVED_B: /* that should never happen */
2854                         case EVT_RESERVED_C: /* that should never happen */
2855                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2856                                 ack = ACKX_SEND_ERROR;
2857                                 break;
2858                         case EVT_UNKNOWN:
2859                         case EVT_FLUSHED:
2860                                 ack = ACKX_SEND_ERROR;
2861                                 break;
2862                         default:
2863                                 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2864                                 ack = ACKX_SEND_ERROR;
2865                                 BUG();
2866                         }
2867                 }
2868
2869                 list_del_init(&packet->driver_list);
2870                 hpsb_packet_sent(ohci->host, packet, ack);
2871
2872                 if (datasize) {
2873                         pci_unmap_single(ohci->dev,
2874                                          cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2875                                          datasize, PCI_DMA_TODEVICE);
2876                         OHCI_DMA_FREE("single Xmit data packet");
2877                 }
2878
2879                 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2880                 d->free_prgs++;
2881         }
2882
2883         dma_trm_flush(ohci, d);
2884
2885         spin_unlock_irqrestore(&d->lock, flags);
2886 }
2887
2888 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2889 {
2890         if (d->ctrlClear) {
2891                 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2892
2893                 if (d->type == DMA_CTX_ISO) {
2894                         /* disable interrupts */
2895                         reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2896                         ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2897                 } else {
2898                         tasklet_kill(&d->task);
2899                 }
2900         }
2901 }
2902
2903
2904 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2905 {
2906         int i;
2907         struct ti_ohci *ohci = d->ohci;
2908
2909         if (ohci == NULL)
2910                 return;
2911
2912         DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2913
2914         if (d->buf_cpu) {
2915                 for (i=0; i<d->num_desc; i++)
2916                         if (d->buf_cpu[i] && d->buf_bus[i]) {
2917                                 pci_free_consistent(
2918                                         ohci->dev, d->buf_size,
2919                                         d->buf_cpu[i], d->buf_bus[i]);
2920                                 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2921                         }
2922                 kfree(d->buf_cpu);
2923                 kfree(d->buf_bus);
2924         }
2925         if (d->prg_cpu) {
2926                 for (i=0; i<d->num_desc; i++)
2927                         if (d->prg_cpu[i] && d->prg_bus[i]) {
2928                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2929                                 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2930                         }
2931                 pci_pool_destroy(d->prg_pool);
2932                 OHCI_DMA_FREE("dma_rcv prg pool");
2933                 kfree(d->prg_cpu);
2934                 kfree(d->prg_bus);
2935         }
2936         kfree(d->spb);
2937
2938         /* Mark this context as freed. */
2939         d->ohci = NULL;
2940 }
2941
2942 static int
2943 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2944                   enum context_type type, int ctx, int num_desc,
2945                   int buf_size, int split_buf_size, int context_base)
2946 {
2947         int i, len;
2948         static int num_allocs;
2949         static char pool_name[20];
2950
2951         d->ohci = ohci;
2952         d->type = type;
2953         d->ctx = ctx;
2954
2955         d->num_desc = num_desc;
2956         d->buf_size = buf_size;
2957         d->split_buf_size = split_buf_size;
2958
2959         d->ctrlSet = 0;
2960         d->ctrlClear = 0;
2961         d->cmdPtr = 0;
2962
2963         d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2964         d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2965
2966         if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2967                 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2968                 free_dma_rcv_ctx(d);
2969                 return -ENOMEM;
2970         }
2971
2972         d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2973         d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2974
2975         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2976                 PRINT(KERN_ERR, "Failed to allocate dma prg");
2977                 free_dma_rcv_ctx(d);
2978                 return -ENOMEM;
2979         }
2980
2981         d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2982
2983         if (d->spb == NULL) {
2984                 PRINT(KERN_ERR, "Failed to allocate split buffer");
2985                 free_dma_rcv_ctx(d);
2986                 return -ENOMEM;
2987         }
2988         
2989         len = sprintf(pool_name, "ohci1394_rcv_prg");
2990         sprintf(pool_name+len, "%d", num_allocs);
2991         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2992                                 sizeof(struct dma_cmd), 4, 0);
2993         if(d->prg_pool == NULL)
2994         {
2995                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2996                 free_dma_rcv_ctx(d);
2997                 return -ENOMEM;
2998         }
2999         num_allocs++;
3000
3001         OHCI_DMA_ALLOC("dma_rcv prg pool");
3002
3003         for (i=0; i<d->num_desc; i++) {
3004                 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3005                                                      d->buf_size,
3006                                                      d->buf_bus+i);
3007                 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3008
3009                 if (d->buf_cpu[i] != NULL) {
3010                         memset(d->buf_cpu[i], 0, d->buf_size);
3011                 } else {
3012                         PRINT(KERN_ERR,
3013                               "Failed to allocate dma buffer");
3014                         free_dma_rcv_ctx(d);
3015                         return -ENOMEM;
3016                 }
3017
3018                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3019                 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3020
3021                 if (d->prg_cpu[i] != NULL) {
3022                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3023                 } else {
3024                         PRINT(KERN_ERR,
3025                               "Failed to allocate dma prg");
3026                         free_dma_rcv_ctx(d);
3027                         return -ENOMEM;
3028                 }
3029         }
3030
3031         spin_lock_init(&d->lock);
3032
3033         if (type == DMA_CTX_ISO) {
3034                 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3035                                           OHCI_ISO_MULTICHANNEL_RECEIVE,
3036                                           dma_rcv_tasklet, (unsigned long) d);
3037         } else {
3038                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3039                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3040                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3041
3042                 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3043         }
3044
3045         return 0;
3046 }
3047
3048 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3049 {
3050         int i;
3051         struct ti_ohci *ohci = d->ohci;
3052
3053         if (ohci == NULL)
3054                 return;
3055
3056         DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3057
3058         if (d->prg_cpu) {
3059                 for (i=0; i<d->num_desc; i++)
3060                         if (d->prg_cpu[i] && d->prg_bus[i]) {
3061                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3062                                 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3063                         }
3064                 pci_pool_destroy(d->prg_pool);
3065                 OHCI_DMA_FREE("dma_trm prg pool");
3066                 kfree(d->prg_cpu);
3067                 kfree(d->prg_bus);
3068         }
3069
3070         /* Mark this context as freed. */
3071         d->ohci = NULL;
3072 }
3073
3074 static int
3075 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3076                   enum context_type type, int ctx, int num_desc,
3077                   int context_base)
3078 {
3079         int i, len;
3080         static char pool_name[20];
3081         static int num_allocs=0;
3082
3083         d->ohci = ohci;
3084         d->type = type;
3085         d->ctx = ctx;
3086         d->num_desc = num_desc;
3087         d->ctrlSet = 0;
3088         d->ctrlClear = 0;
3089         d->cmdPtr = 0;
3090
3091         d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3092         d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3093
3094         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3095                 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3096                 free_dma_trm_ctx(d);
3097                 return -ENOMEM;
3098         }
3099
3100         len = sprintf(pool_name, "ohci1394_trm_prg");
3101         sprintf(pool_name+len, "%d", num_allocs);
3102         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3103                                 sizeof(struct at_dma_prg), 4, 0);
3104         if (d->prg_pool == NULL) {
3105                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3106                 free_dma_trm_ctx(d);
3107                 return -ENOMEM;
3108         }
3109         num_allocs++;
3110
3111         OHCI_DMA_ALLOC("dma_rcv prg pool");
3112
3113         for (i = 0; i < d->num_desc; i++) {
3114                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3115                 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3116
3117                 if (d->prg_cpu[i] != NULL) {
3118                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3119                 } else {
3120                         PRINT(KERN_ERR,
3121                               "Failed to allocate at dma prg");
3122                         free_dma_trm_ctx(d);
3123                         return -ENOMEM;
3124                 }
3125         }
3126
3127         spin_lock_init(&d->lock);
3128
3129         /* initialize tasklet */
3130         if (type == DMA_CTX_ISO) {
3131                 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3132                                           dma_trm_tasklet, (unsigned long) d);
3133                 if (ohci1394_register_iso_tasklet(ohci,
3134                                                   &ohci->it_legacy_tasklet) < 0) {
3135                         PRINT(KERN_ERR, "No IT DMA context available");
3136                         free_dma_trm_ctx(d);
3137                         return -EBUSY;
3138                 }
3139
3140                 /* IT can be assigned to any context by register_iso_tasklet */
3141                 d->ctx = ohci->it_legacy_tasklet.context;
3142                 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3143                 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3144                 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3145         } else {
3146                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3147                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3148                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3149                 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3150         }
3151
3152         return 0;
3153 }
3154
3155 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3156 {
3157         struct ti_ohci *ohci = host->hostdata;
3158
3159         reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3160         reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3161
3162         memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3163 }
3164
3165
3166 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3167                                  quadlet_t data, quadlet_t compare)
3168 {
3169         struct ti_ohci *ohci = host->hostdata;
3170         int i;
3171
3172         reg_write(ohci, OHCI1394_CSRData, data);
3173         reg_write(ohci, OHCI1394_CSRCompareData, compare);
3174         reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3175
3176         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3177                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3178                         break;
3179
3180                 mdelay(1);
3181         }
3182
3183         return reg_read(ohci, OHCI1394_CSRData);
3184 }
3185
3186 static struct hpsb_host_driver ohci1394_driver = {
3187         .owner =                THIS_MODULE,
3188         .name =                 OHCI1394_DRIVER_NAME,
3189         .set_hw_config_rom =    ohci_set_hw_config_rom,
3190         .transmit_packet =      ohci_transmit,
3191         .devctl =               ohci_devctl,
3192         .isoctl =               ohci_isoctl,
3193         .hw_csr_reg =           ohci_hw_csr_reg,
3194 };
3195
3196 /***********************************
3197  * PCI Driver Interface functions  *
3198  ***********************************/
3199
3200 #define FAIL(err, fmt, args...)                 \
3201 do {                                            \
3202         PRINT_G(KERN_ERR, fmt , ## args);       \
3203         ohci1394_pci_remove(dev);               \
3204         return err;                             \
3205 } while (0)
3206
3207 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3208                                         const struct pci_device_id *ent)
3209 {
3210         struct hpsb_host *host;
3211         struct ti_ohci *ohci;   /* shortcut to currently handled device */
3212         resource_size_t ohci_base;
3213
3214 #ifdef CONFIG_PPC_PMAC
3215         /* Necessary on some machines if ohci1394 was loaded/ unloaded before */
3216         if (machine_is(powermac)) {
3217                 struct device_node *ofn = pci_device_to_OF_node(dev);
3218
3219                 if (ofn) {
3220                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3221                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3222                 }
3223         }
3224 #endif /* CONFIG_PPC_PMAC */
3225
3226         if (pci_enable_device(dev))
3227                 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3228         pci_set_master(dev);
3229
3230         host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3231         if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3232
3233         ohci = host->hostdata;
3234         ohci->dev = dev;
3235         ohci->host = host;
3236         ohci->init_state = OHCI_INIT_ALLOC_HOST;
3237         host->pdev = dev;
3238         pci_set_drvdata(dev, ohci);
3239
3240         /* We don't want hardware swapping */
3241         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3242
3243         /* Some oddball Apple controllers do not order the selfid
3244          * properly, so we make up for it here.  */
3245 #ifndef __LITTLE_ENDIAN
3246         /* XXX: Need a better way to check this. I'm wondering if we can
3247          * read the values of the OHCI1394_PCI_HCI_Control and the
3248          * noByteSwapData registers to see if they were not cleared to
3249          * zero. Should this work? Obviously it's not defined what these
3250          * registers will read when they aren't supported. Bleh! */
3251         if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3252             dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3253                 ohci->no_swap_incoming = 1;
3254                 ohci->selfid_swap = 0;
3255         } else
3256                 ohci->selfid_swap = 1;
3257 #endif
3258
3259
3260 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3261 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3262 #endif
3263
3264         /* These chipsets require a bit of extra care when checking after
3265          * a busreset.  */
3266         if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3267              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3268             (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3269              dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3270                 ohci->check_busreset = 1;
3271
3272         /* We hardwire the MMIO length, since some CardBus adaptors
3273          * fail to report the right length.  Anyway, the ohci spec
3274          * clearly says it's 2kb, so this shouldn't be a problem. */
3275         ohci_base = pci_resource_start(dev, 0);
3276         if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3277                 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3278                       (unsigned long long)pci_resource_len(dev, 0));
3279
3280         if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3281                                 OHCI1394_DRIVER_NAME))
3282                 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
3283                         (unsigned long long)ohci_base,
3284                         (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3285         ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3286
3287         ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3288         if (ohci->registers == NULL)
3289                 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3290         ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3291         DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3292
3293         /* csr_config rom allocation */
3294         ohci->csr_config_rom_cpu =
3295                 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3296                                      &ohci->csr_config_rom_bus);
3297         OHCI_DMA_ALLOC("consistent csr_config_rom");
3298         if (ohci->csr_config_rom_cpu == NULL)
3299                 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3300         ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3301
3302         /* self-id dma buffer allocation */
3303         ohci->selfid_buf_cpu =
3304                 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3305                       &ohci->selfid_buf_bus);
3306         OHCI_DMA_ALLOC("consistent selfid_buf");
3307
3308         if (ohci->selfid_buf_cpu == NULL)
3309                 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3310         ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3311
3312         if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3313                 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3314                       "8Kb boundary... may cause problems on some CXD3222 chip",
3315                       ohci->selfid_buf_cpu);
3316
3317         /* No self-id errors at startup */
3318         ohci->self_id_errors = 0;
3319
3320         ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3321         /* AR DMA request context allocation */
3322         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3323                               DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3324                               AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3325                               OHCI1394_AsReqRcvContextBase) < 0)
3326                 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3327
3328         /* AR DMA response context allocation */
3329         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3330                               DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3331                               AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3332                               OHCI1394_AsRspRcvContextBase) < 0)
3333                 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3334
3335         /* AT DMA request context */
3336         if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3337                               DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3338                               OHCI1394_AsReqTrContextBase) < 0)
3339                 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3340
3341         /* AT DMA response context */
3342         if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3343                               DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3344                               OHCI1394_AsRspTrContextBase) < 0)
3345                 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3346
3347         /* Start off with a soft reset, to clear everything to a sane
3348          * state. */
3349         ohci_soft_reset(ohci);
3350
3351         /* Now enable LPS, which we need in order to start accessing
3352          * most of the registers.  In fact, on some cards (ALI M5251),
3353          * accessing registers in the SClk domain without LPS enabled
3354          * will lock up the machine.  Wait 50msec to make sure we have
3355          * full link enabled.  */
3356         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3357
3358         /* Disable and clear interrupts */
3359         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3360         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3361
3362         mdelay(50);
3363
3364         /* Determine the number of available IR and IT contexts. */
3365         ohci->nb_iso_rcv_ctx =
3366                 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3367         ohci->nb_iso_xmit_ctx =
3368                 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3369
3370         /* Set the usage bits for non-existent contexts so they can't
3371          * be allocated */
3372         ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3373         ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3374
3375         INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3376         spin_lock_init(&ohci->iso_tasklet_list_lock);
3377         ohci->ISO_channel_usage = 0;
3378         spin_lock_init(&ohci->IR_channel_lock);
3379
3380         /* Allocate the IR DMA context right here so we don't have
3381          * to do it in interrupt path - note that this doesn't
3382          * waste much memory and avoids the jugglery required to
3383          * allocate it in IRQ path. */
3384         if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3385                               DMA_CTX_ISO, 0, IR_NUM_DESC,
3386                               IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3387                               OHCI1394_IsoRcvContextBase) < 0) {
3388                 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3389         }
3390
3391         /* We hopefully don't have to pre-allocate IT DMA like we did
3392          * for IR DMA above. Allocate it on-demand and mark inactive. */
3393         ohci->it_legacy_context.ohci = NULL;
3394         spin_lock_init(&ohci->event_lock);
3395
3396         /*
3397          * interrupts are disabled, all right, but... due to IRQF_SHARED we
3398          * might get called anyway.  We'll see no event, of course, but
3399          * we need to get to that "no event", so enough should be initialized
3400          * by that point.
3401          */
3402         if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3403                          OHCI1394_DRIVER_NAME, ohci))
3404                 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3405
3406         ohci->init_state = OHCI_INIT_HAVE_IRQ;
3407         ohci_initialize(ohci);
3408
3409         /* Set certain csr values */
3410         host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3411         host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3412         host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3413         host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3414         host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3415
3416         if (phys_dma) {
3417                 host->low_addr_space =
3418                         (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3419                 if (!host->low_addr_space)
3420                         host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3421         }
3422         host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3423
3424         /* Tell the highlevel this host is ready */
3425         if (hpsb_add_host(host))
3426                 FAIL(-ENOMEM, "Failed to register host with highlevel");
3427
3428         ohci->init_state = OHCI_INIT_DONE;
3429
3430         return 0;
3431 #undef FAIL
3432 }
3433
3434 static void ohci1394_pci_remove(struct pci_dev *pdev)
3435 {
3436         struct ti_ohci *ohci;
3437         struct device *dev;
3438
3439         ohci = pci_get_drvdata(pdev);
3440         if (!ohci)
3441                 return;
3442
3443         dev = get_device(&ohci->host->device);
3444
3445         switch (ohci->init_state) {
3446         case OHCI_INIT_DONE:
3447                 hpsb_remove_host(ohci->host);
3448
3449                 /* Clear out BUS Options */
3450                 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3451                 reg_write(ohci, OHCI1394_BusOptions,
3452                           (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3453                           0x00ff0000);
3454                 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3455
3456         case OHCI_INIT_HAVE_IRQ:
3457                 /* Clear interrupt registers */
3458                 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3459                 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3460                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3461                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3462                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3463                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3464
3465                 /* Disable IRM Contender */
3466                 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3467
3468                 /* Clear link control register */
3469                 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3470
3471                 /* Let all other nodes know to ignore us */
3472                 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3473
3474                 /* Soft reset before we start - this disables
3475                  * interrupts and clears linkEnable and LPS. */
3476                 ohci_soft_reset(ohci);
3477                 free_irq(ohci->dev->irq, ohci);
3478
3479         case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3480                 /* The ohci_soft_reset() stops all DMA contexts, so we
3481                  * dont need to do this.  */
3482                 free_dma_rcv_ctx(&ohci->ar_req_context);
3483                 free_dma_rcv_ctx(&ohci->ar_resp_context);
3484                 free_dma_trm_ctx(&ohci->at_req_context);
3485                 free_dma_trm_ctx(&ohci->at_resp_context);
3486                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3487                 free_dma_trm_ctx(&ohci->it_legacy_context);
3488
3489         case OHCI_INIT_HAVE_SELFID_BUFFER:
3490                 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3491                                     ohci->selfid_buf_cpu,
3492                                     ohci->selfid_buf_bus);
3493                 OHCI_DMA_FREE("consistent selfid_buf");
3494
3495         case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3496                 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3497                                     ohci->csr_config_rom_cpu,
3498                                     ohci->csr_config_rom_bus);
3499                 OHCI_DMA_FREE("consistent csr_config_rom");
3500
3501         case OHCI_INIT_HAVE_IOMAPPING:
3502                 iounmap(ohci->registers);
3503
3504         case OHCI_INIT_HAVE_MEM_REGION:
3505                 release_mem_region(pci_resource_start(ohci->dev, 0),
3506                                    OHCI1394_REGISTER_SIZE);
3507
3508 #ifdef CONFIG_PPC_PMAC
3509         /* On UniNorth, power down the cable and turn off the chip clock
3510          * to save power on laptops */
3511         if (machine_is(powermac)) {
3512                 struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
3513
3514                 if (ofn) {
3515                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3516                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3517                 }
3518         }
3519 #endif /* CONFIG_PPC_PMAC */
3520
3521         case OHCI_INIT_ALLOC_HOST:
3522                 pci_set_drvdata(ohci->dev, NULL);
3523         }
3524
3525         if (dev)
3526                 put_device(dev);
3527 }
3528
3529 #ifdef CONFIG_PM
3530 static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3531 {
3532         int err;
3533         struct ti_ohci *ohci = pci_get_drvdata(pdev);
3534
3535         if (!ohci) {
3536                 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3537                        OHCI1394_DRIVER_NAME);
3538                 return -ENXIO;
3539         }
3540         DBGMSG("suspend called");
3541
3542         /* Clear the async DMA contexts and stop using the controller */
3543         hpsb_bus_reset(ohci->host);
3544
3545         /* See ohci1394_pci_remove() for comments on this sequence */
3546         reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3547         reg_write(ohci, OHCI1394_BusOptions,
3548                   (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3549                   0x00ff0000);
3550         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3551         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3552         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3553         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3554         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3555         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3556         set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3557         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3558         ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3559         ohci_soft_reset(ohci);
3560
3561         err = pci_save_state(pdev);
3562         if (err) {
3563                 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3564                 return err;
3565         }
3566         err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
3567         if (err)
3568                 DBGMSG("pci_set_power_state failed with %d", err);
3569
3570 /* PowerMac suspend code comes last */
3571 #ifdef CONFIG_PPC_PMAC
3572         if (machine_is(powermac)) {
3573                 struct device_node *ofn = pci_device_to_OF_node(pdev);
3574
3575                 if (ofn)
3576                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3577         }
3578 #endif /* CONFIG_PPC_PMAC */
3579
3580         return 0;
3581 }
3582
3583 static int ohci1394_pci_resume(struct pci_dev *pdev)
3584 {
3585         int err;
3586         struct ti_ohci *ohci = pci_get_drvdata(pdev);
3587
3588         if (!ohci) {
3589                 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3590                        OHCI1394_DRIVER_NAME);
3591                 return -ENXIO;
3592         }
3593         DBGMSG("resume called");
3594
3595 /* PowerMac resume code comes first */
3596 #ifdef CONFIG_PPC_PMAC
3597         if (machine_is(powermac)) {
3598                 struct device_node *ofn = pci_device_to_OF_node(pdev);
3599
3600                 if (ofn)
3601                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3602         }
3603 #endif /* CONFIG_PPC_PMAC */
3604
3605         pci_set_power_state(pdev, PCI_D0);
3606         pci_restore_state(pdev);
3607         err = pci_enable_device(pdev);
3608         if (err) {
3609                 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3610                 return err;
3611         }
3612
3613         /* See ohci1394_pci_probe() for comments on this sequence */
3614         ohci_soft_reset(ohci);
3615         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3616         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3617         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3618         mdelay(50);
3619         ohci_initialize(ohci);
3620
3621         hpsb_resume_host(ohci->host);
3622         return 0;
3623 }
3624 #endif /* CONFIG_PM */
3625
3626 static struct pci_device_id ohci1394_pci_tbl[] = {
3627         {
3628                 .class =        PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3629                 .class_mask =   PCI_ANY_ID,
3630                 .vendor =       PCI_ANY_ID,
3631                 .device =       PCI_ANY_ID,
3632                 .subvendor =    PCI_ANY_ID,
3633                 .subdevice =    PCI_ANY_ID,
3634         },
3635         { 0, },
3636 };
3637
3638 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3639
3640 static struct pci_driver ohci1394_pci_driver = {
3641         .name =         OHCI1394_DRIVER_NAME,
3642         .id_table =     ohci1394_pci_tbl,
3643         .probe =        ohci1394_pci_probe,
3644         .remove =       ohci1394_pci_remove,
3645 #ifdef CONFIG_PM
3646         .resume =       ohci1394_pci_resume,
3647         .suspend =      ohci1394_pci_suspend,
3648 #endif
3649 };
3650
3651 /***********************************
3652  * OHCI1394 Video Interface        *
3653  ***********************************/
3654
3655 /* essentially the only purpose of this code is to allow another
3656    module to hook into ohci's interrupt handler */
3657
3658 /* returns zero if successful, one if DMA context is locked up */
3659 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3660 {
3661         int i=0;
3662
3663         /* stop the channel program if it's still running */
3664         reg_write(ohci, reg, 0x8000);
3665
3666         /* Wait until it effectively stops */
3667         while (reg_read(ohci, reg) & 0x400) {
3668                 i++;
3669                 if (i>5000) {
3670                         PRINT(KERN_ERR,
3671                               "Runaway loop while stopping context: %s...", msg ? msg : "");
3672                         return 1;
3673                 }
3674
3675                 mb();
3676                 udelay(10);
3677         }
3678         if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3679         return 0;
3680 }
3681
3682 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3683                                void (*func)(unsigned long), unsigned long data)
3684 {
3685         tasklet_init(&tasklet->tasklet, func, data);
3686         tasklet->type = type;
3687         /* We init the tasklet->link field, so we can list_del() it
3688          * without worrying whether it was added to the list or not. */
3689         INIT_LIST_HEAD(&tasklet->link);
3690 }
3691
3692 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3693                                   struct ohci1394_iso_tasklet *tasklet)
3694 {
3695         unsigned long flags, *usage;
3696         int n, i, r = -EBUSY;
3697
3698         if (tasklet->type == OHCI_ISO_TRANSMIT) {
3699                 n = ohci->nb_iso_xmit_ctx;
3700                 usage = &ohci->it_ctx_usage;
3701         }
3702         else {
3703                 n = ohci->nb_iso_rcv_ctx;
3704                 usage = &ohci->ir_ctx_usage;
3705
3706                 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3707                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3708                         if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3709                                 return r;
3710                         }
3711                 }
3712         }
3713
3714         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3715
3716         for (i = 0; i < n; i++)
3717                 if (!test_and_set_bit(i, usage)) {
3718                         tasklet->context = i;
3719                         list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3720                         r = 0;
3721                         break;
3722                 }
3723
3724         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3725
3726         return r;
3727 }
3728
3729 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3730                                      struct ohci1394_iso_tasklet *tasklet)
3731 {
3732         unsigned long flags;
3733
3734         tasklet_kill(&tasklet->tasklet);
3735
3736         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3737
3738         if (tasklet->type == OHCI_ISO_TRANSMIT)
3739                 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3740         else {
3741                 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3742
3743                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3744                         clear_bit(0, &ohci->ir_multichannel_used);
3745                 }
3746         }
3747
3748         list_del(&tasklet->link);
3749
3750         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3751 }
3752
3753 EXPORT_SYMBOL(ohci1394_stop_context);
3754 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3755 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3756 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3757
3758 /***********************************
3759  * General module initialization   *
3760  ***********************************/
3761
3762 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3763 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3764 MODULE_LICENSE("GPL");
3765
3766 static void __exit ohci1394_cleanup (void)
3767 {
3768         pci_unregister_driver(&ohci1394_pci_driver);
3769 }
3770
3771 static int __init ohci1394_init(void)
3772 {
3773         return pci_register_driver(&ohci1394_pci_driver);
3774 }
3775
3776 /* Register before most other device drivers.
3777  * Useful for remote debugging via physical DMA, e.g. using firescope. */
3778 fs_initcall(ohci1394_init);
3779 module_exit(ohci1394_cleanup);