[ATM] drivers/atm/iphase.c: compilation warning fix
[safe/jmp/linux-2.6] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <linux/wait.h>
57 #include <asm/system.h>  
58 #include <asm/io.h>  
59 #include <asm/atomic.h>  
60 #include <asm/uaccess.h>  
61 #include <asm/string.h>  
62 #include <asm/byteorder.h>  
63 #include <linux/vmalloc.h>  
64 #include "iphase.h"               
65 #include "suni.h"                 
66 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))  
67 struct suni_priv {
68         struct k_sonet_stats sonet_stats; /* link diagnostics */
69         unsigned char loop_mode;        /* loopback mode */
70         struct atm_dev *dev;            /* device back-pointer */
71         struct suni_priv *next;         /* next SUNI */
72 }; 
73 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
74
75 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
76 static void desc_dbg(IADEV *iadev);
77
78 static IADEV *ia_dev[8];
79 static struct atm_dev *_ia_dev[8];
80 static int iadev_count;
81 static void ia_led_timer(unsigned long arg);
82 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
83 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
84 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
85 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
86             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
87
88 module_param(IA_TX_BUF, int, 0);
89 module_param(IA_TX_BUF_SZ, int, 0);
90 module_param(IA_RX_BUF, int, 0);
91 module_param(IA_RX_BUF_SZ, int, 0);
92 module_param(IADebugFlag, uint, 0644);
93
94 MODULE_LICENSE("GPL");
95
96 #if BITS_PER_LONG != 32
97 #  error FIXME: this driver only works on 32-bit platforms
98 #endif
99
100 /**************************** IA_LIB **********************************/
101
102 static void ia_init_rtn_q (IARTN_Q *que) 
103
104    que->next = NULL; 
105    que->tail = NULL; 
106 }
107
108 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
109 {
110    data->next = NULL;
111    if (que->next == NULL) 
112       que->next = que->tail = data;
113    else {
114       data->next = que->next;
115       que->next = data;
116    } 
117    return;
118 }
119
120 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
121    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
122    if (!entry) return -1;
123    entry->data = data;
124    entry->next = NULL;
125    if (que->next == NULL) 
126       que->next = que->tail = entry;
127    else {
128       que->tail->next = entry;
129       que->tail = que->tail->next;
130    }      
131    return 1;
132 }
133
134 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
135    IARTN_Q *tmpdata;
136    if (que->next == NULL)
137       return NULL;
138    tmpdata = que->next;
139    if ( que->next == que->tail)  
140       que->next = que->tail = NULL;
141    else 
142       que->next = que->next->next;
143    return tmpdata;
144 }
145
146 static void ia_hack_tcq(IADEV *dev) {
147
148   u_short               desc1;
149   u_short               tcq_wr;
150   struct ia_vcc         *iavcc_r = NULL; 
151
152   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
153   while (dev->host_tcq_wr != tcq_wr) {
154      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
155      if (!desc1) ;
156      else if (!dev->desc_tbl[desc1 -1].timestamp) {
157         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
158         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
159      }                                 
160      else if (dev->desc_tbl[desc1 -1].timestamp) {
161         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
162            printk("IA: Fatal err in get_desc\n");
163            continue;
164         }
165         iavcc_r->vc_desc_cnt--;
166         dev->desc_tbl[desc1 -1].timestamp = 0;
167         IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n", 
168                                    (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
169         if (iavcc_r->pcr < dev->rate_limit) {
170            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
171            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
172               printk("ia_hack_tcq: No memory available\n");
173         } 
174         dev->desc_tbl[desc1 -1].iavcc = NULL;
175         dev->desc_tbl[desc1 -1].txskb = NULL;
176      }
177      dev->host_tcq_wr += 2;
178      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
179         dev->host_tcq_wr = dev->ffL.tcq_st;
180   }
181 } /* ia_hack_tcq */
182
183 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
184   u_short               desc_num, i;
185   struct sk_buff        *skb;
186   struct ia_vcc         *iavcc_r = NULL; 
187   unsigned long delta;
188   static unsigned long timer = 0;
189   int ltimeout;
190
191   ia_hack_tcq (dev);
192   if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){      
193      timer = jiffies; 
194      i=0;
195      while (i < dev->num_tx_desc) {
196         if (!dev->desc_tbl[i].timestamp) {
197            i++;
198            continue;
199         }
200         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
201         delta = jiffies - dev->desc_tbl[i].timestamp;
202         if (delta >= ltimeout) {
203            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
204            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
205               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
206            else 
207               dev->ffL.tcq_rd -= 2;
208            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
209            if (!(skb = dev->desc_tbl[i].txskb) || 
210                           !(iavcc_r = dev->desc_tbl[i].iavcc))
211               printk("Fatal err, desc table vcc or skb is NULL\n");
212            else 
213               iavcc_r->vc_desc_cnt--;
214            dev->desc_tbl[i].timestamp = 0;
215            dev->desc_tbl[i].iavcc = NULL;
216            dev->desc_tbl[i].txskb = NULL;
217         }
218         i++;
219      } /* while */
220   }
221   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
222      return 0xFFFF;
223     
224   /* Get the next available descriptor number from TCQ */
225   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
226
227   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
228      dev->ffL.tcq_rd += 2;
229      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
230      dev->ffL.tcq_rd = dev->ffL.tcq_st;
231      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
232         return 0xFFFF; 
233      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
234   }
235
236   /* get system time */
237   dev->desc_tbl[desc_num -1].timestamp = jiffies;
238   return desc_num;
239 }
240
241 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
242   u_char                foundLockUp;
243   vcstatus_t            *vcstatus;
244   u_short               *shd_tbl;
245   u_short               tempCellSlot, tempFract;
246   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
247   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
248   u_int  i;
249
250   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
251      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
252      vcstatus->cnt++;
253      foundLockUp = 0;
254      if( vcstatus->cnt == 0x05 ) {
255         abr_vc += vcc->vci;
256         eabr_vc += vcc->vci;
257         if( eabr_vc->last_desc ) {
258            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
259               /* Wait for 10 Micro sec */
260               udelay(10);
261               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
262                  foundLockUp = 1;
263            }
264            else {
265               tempCellSlot = abr_vc->last_cell_slot;
266               tempFract    = abr_vc->fraction;
267               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
268                          && (tempFract == dev->testTable[vcc->vci]->fract))
269                  foundLockUp = 1;                   
270               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
271               dev->testTable[vcc->vci]->fract = tempFract; 
272            }        
273         } /* last descriptor */            
274         vcstatus->cnt = 0;      
275      } /* vcstatus->cnt */
276         
277      if (foundLockUp) {
278         IF_ABR(printk("LOCK UP found\n");) 
279         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
280         /* Wait for 10 Micro sec */
281         udelay(10); 
282         abr_vc->status &= 0xFFF8;
283         abr_vc->status |= 0x0001;  /* state is idle */
284         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
285         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
286         if (i < dev->num_vc)
287            shd_tbl[i] = vcc->vci;
288         else
289            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
290         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
291         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
292         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
293         vcstatus->cnt = 0;
294      } /* foundLockUp */
295
296   } /* if an ABR VC */
297
298
299 }
300  
301 /*
302 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
303 **
304 **  +----+----+------------------+-------------------------------+
305 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
306 **  +----+----+------------------+-------------------------------+
307 ** 
308 **    R = reserved (written as 0)
309 **    NZ = 0 if 0 cells/sec; 1 otherwise
310 **
311 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
312 */
313 static u16
314 cellrate_to_float(u32 cr)
315 {
316
317 #define NZ              0x4000
318 #define M_BITS          9               /* Number of bits in mantissa */
319 #define E_BITS          5               /* Number of bits in exponent */
320 #define M_MASK          0x1ff           
321 #define E_MASK          0x1f
322   u16   flot;
323   u32   tmp = cr & 0x00ffffff;
324   int   i   = 0;
325   if (cr == 0)
326      return 0;
327   while (tmp != 1) {
328      tmp >>= 1;
329      i++;
330   }
331   if (i == M_BITS)
332      flot = NZ | (i << M_BITS) | (cr & M_MASK);
333   else if (i < M_BITS)
334      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
335   else
336      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
337   return flot;
338 }
339
340 #if 0
341 /*
342 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
343 */
344 static u32
345 float_to_cellrate(u16 rate)
346 {
347   u32   exp, mantissa, cps;
348   if ((rate & NZ) == 0)
349      return 0;
350   exp = (rate >> M_BITS) & E_MASK;
351   mantissa = rate & M_MASK;
352   if (exp == 0)
353      return 1;
354   cps = (1 << M_BITS) | mantissa;
355   if (exp == M_BITS)
356      cps = cps;
357   else if (exp > M_BITS)
358      cps <<= (exp - M_BITS);
359   else
360      cps >>= (M_BITS - exp);
361   return cps;
362 }
363 #endif 
364
365 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
366   srv_p->class_type = ATM_ABR;
367   srv_p->pcr        = dev->LineRate;
368   srv_p->mcr        = 0;
369   srv_p->icr        = 0x055cb7;
370   srv_p->tbe        = 0xffffff;
371   srv_p->frtt       = 0x3a;
372   srv_p->rif        = 0xf;
373   srv_p->rdf        = 0xb;
374   srv_p->nrm        = 0x4;
375   srv_p->trm        = 0x7;
376   srv_p->cdf        = 0x3;
377   srv_p->adtf       = 50;
378 }
379
380 static int
381 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
382                                                 struct atm_vcc *vcc, u8 flag)
383 {
384   f_vc_abr_entry  *f_abr_vc;
385   r_vc_abr_entry  *r_abr_vc;
386   u32           icr;
387   u8            trm, nrm, crm;
388   u16           adtf, air, *ptr16;      
389   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
390   f_abr_vc += vcc->vci;       
391   switch (flag) {
392      case 1: /* FFRED initialization */
393 #if 0  /* sanity check */
394        if (srv_p->pcr == 0)
395           return INVALID_PCR;
396        if (srv_p->pcr > dev->LineRate)
397           srv_p->pcr = dev->LineRate;
398        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
399           return MCR_UNAVAILABLE;
400        if (srv_p->mcr > srv_p->pcr)
401           return INVALID_MCR;
402        if (!(srv_p->icr))
403           srv_p->icr = srv_p->pcr;
404        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
405           return INVALID_ICR;
406        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
407           return INVALID_TBE;
408        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
409           return INVALID_FRTT;
410        if (srv_p->nrm > MAX_NRM)
411           return INVALID_NRM;
412        if (srv_p->trm > MAX_TRM)
413           return INVALID_TRM;
414        if (srv_p->adtf > MAX_ADTF)
415           return INVALID_ADTF;
416        else if (srv_p->adtf == 0)
417           srv_p->adtf = 1;
418        if (srv_p->cdf > MAX_CDF)
419           return INVALID_CDF;
420        if (srv_p->rif > MAX_RIF)
421           return INVALID_RIF;
422        if (srv_p->rdf > MAX_RDF)
423           return INVALID_RDF;
424 #endif
425        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
426        f_abr_vc->f_vc_type = ABR;
427        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
428                                   /* i.e 2**n = 2 << (n-1) */
429        f_abr_vc->f_nrm = nrm << 8 | nrm;
430        trm = 100000/(2 << (16 - srv_p->trm));
431        if ( trm == 0) trm = 1;
432        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
433        crm = srv_p->tbe / nrm;
434        if (crm == 0) crm = 1;
435        f_abr_vc->f_crm = crm & 0xff;
436        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
437        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
438                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
439                                 (1000000/(srv_p->frtt/srv_p->tbe)));
440        f_abr_vc->f_icr = cellrate_to_float(icr);
441        adtf = (10000 * srv_p->adtf)/8192;
442        if (adtf == 0) adtf = 1; 
443        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
444        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
445        f_abr_vc->f_acr = f_abr_vc->f_icr;
446        f_abr_vc->f_status = 0x0042;
447        break;
448     case 0: /* RFRED initialization */  
449        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
450        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
451        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
452        r_abr_vc += vcc->vci;
453        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
454        air = srv_p->pcr << (15 - srv_p->rif);
455        if (air == 0) air = 1;
456        r_abr_vc->r_air = cellrate_to_float(air);
457        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
458        dev->sum_mcr        += srv_p->mcr;
459        dev->n_abr++;
460        break;
461     default:
462        break;
463   }
464   return        0;
465 }
466 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
467    u32 rateLow=0, rateHigh, rate;
468    int entries;
469    struct ia_vcc *ia_vcc;
470
471    int   idealSlot =0, testSlot, toBeAssigned, inc;
472    u32   spacing;
473    u16  *SchedTbl, *TstSchedTbl;
474    u16  cbrVC, vcIndex;
475    u32   fracSlot    = 0;
476    u32   sp_mod      = 0;
477    u32   sp_mod2     = 0;
478
479    /* IpAdjustTrafficParams */
480    if (vcc->qos.txtp.max_pcr <= 0) {
481       IF_ERR(printk("PCR for CBR not defined\n");)
482       return -1;
483    }
484    rate = vcc->qos.txtp.max_pcr;
485    entries = rate / dev->Granularity;
486    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
487                                 entries, rate, dev->Granularity);)
488    if (entries < 1)
489       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
490    rateLow  =  entries * dev->Granularity;
491    rateHigh = (entries + 1) * dev->Granularity;
492    if (3*(rate - rateLow) > (rateHigh - rate))
493       entries++;
494    if (entries > dev->CbrRemEntries) {
495       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
496       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
497                                        entries, dev->CbrRemEntries);)
498       return -EBUSY;
499    }   
500
501    ia_vcc = INPH_IA_VCC(vcc);
502    ia_vcc->NumCbrEntry = entries; 
503    dev->sum_mcr += entries * dev->Granularity; 
504    /* IaFFrednInsertCbrSched */
505    // Starting at an arbitrary location, place the entries into the table
506    // as smoothly as possible
507    cbrVC   = 0;
508    spacing = dev->CbrTotEntries / entries;
509    sp_mod  = dev->CbrTotEntries % entries; // get modulo
510    toBeAssigned = entries;
511    fracSlot = 0;
512    vcIndex  = vcc->vci;
513    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
514    while (toBeAssigned)
515    {
516       // If this is the first time, start the table loading for this connection
517       // as close to entryPoint as possible.
518       if (toBeAssigned == entries)
519       {
520          idealSlot = dev->CbrEntryPt;
521          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
522          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
523             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
524       } else {
525          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
526          // in the table that would be  smoothest
527          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
528          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
529       }
530       if (idealSlot >= (int)dev->CbrTotEntries) 
531          idealSlot -= dev->CbrTotEntries;  
532       // Continuously check around this ideal value until a null
533       // location is encountered.
534       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
535       inc = 0;
536       testSlot = idealSlot;
537       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
538       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
539                                 testSlot, (u32)TstSchedTbl,toBeAssigned);) 
540       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
541       while (cbrVC)  // If another VC at this location, we have to keep looking
542       {
543           inc++;
544           testSlot = idealSlot - inc;
545           if (testSlot < 0) { // Wrap if necessary
546              testSlot += dev->CbrTotEntries;
547              IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
548                                                        (u32)SchedTbl,testSlot);)
549           }
550           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
551           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
552           if (!cbrVC)
553              break;
554           testSlot = idealSlot + inc;
555           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
556              testSlot -= dev->CbrTotEntries;
557              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
558              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
559                                             testSlot, toBeAssigned);)
560           } 
561           // set table index and read in value
562           TstSchedTbl = (u16*)(SchedTbl + testSlot);
563           IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
564                           (u32)TstSchedTbl,cbrVC,inc);) 
565           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
566        } /* while */
567        // Move this VCI number into this location of the CBR Sched table.
568        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
569        dev->CbrRemEntries--;
570        toBeAssigned--;
571    } /* while */ 
572
573    /* IaFFrednCbrEnable */
574    dev->NumEnabledCBR++;
575    if (dev->NumEnabledCBR == 1) {
576        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
577        IF_CBR(printk("CBR is enabled\n");)
578    }
579    return 0;
580 }
581 static void ia_cbrVc_close (struct atm_vcc *vcc) {
582    IADEV *iadev;
583    u16 *SchedTbl, NullVci = 0;
584    u32 i, NumFound;
585
586    iadev = INPH_IA_DEV(vcc->dev);
587    iadev->NumEnabledCBR--;
588    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
589    if (iadev->NumEnabledCBR == 0) {
590       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
591       IF_CBR (printk("CBR support disabled\n");)
592    }
593    NumFound = 0;
594    for (i=0; i < iadev->CbrTotEntries; i++)
595    {
596       if (*SchedTbl == vcc->vci) {
597          iadev->CbrRemEntries++;
598          *SchedTbl = NullVci;
599          IF_CBR(NumFound++;)
600       }
601       SchedTbl++;   
602    } 
603    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
604 }
605
606 static int ia_avail_descs(IADEV *iadev) {
607    int tmp = 0;
608    ia_hack_tcq(iadev);
609    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
610       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
611    else
612       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
613                    iadev->ffL.tcq_st) / 2;
614    return tmp;
615 }    
616
617 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
618
619 static int ia_que_tx (IADEV *iadev) { 
620    struct sk_buff *skb;
621    int num_desc;
622    struct atm_vcc *vcc;
623    struct ia_vcc *iavcc;
624    num_desc = ia_avail_descs(iadev);
625
626    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
627       if (!(vcc = ATM_SKB(skb)->vcc)) {
628          dev_kfree_skb_any(skb);
629          printk("ia_que_tx: Null vcc\n");
630          break;
631       }
632       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
633          dev_kfree_skb_any(skb);
634          printk("Free the SKB on closed vci %d \n", vcc->vci);
635          break;
636       }
637       iavcc = INPH_IA_VCC(vcc);
638       if (ia_pkt_tx (vcc, skb)) {
639          skb_queue_head(&iadev->tx_backlog, skb);
640       }
641       num_desc--;
642    }
643    return 0;
644 }
645
646 static void ia_tx_poll (IADEV *iadev) {
647    struct atm_vcc *vcc = NULL;
648    struct sk_buff *skb = NULL, *skb1 = NULL;
649    struct ia_vcc *iavcc;
650    IARTN_Q *  rtne;
651
652    ia_hack_tcq(iadev);
653    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
654        skb = rtne->data.txskb;
655        if (!skb) {
656            printk("ia_tx_poll: skb is null\n");
657            goto out;
658        }
659        vcc = ATM_SKB(skb)->vcc;
660        if (!vcc) {
661            printk("ia_tx_poll: vcc is null\n");
662            dev_kfree_skb_any(skb);
663            goto out;
664        }
665
666        iavcc = INPH_IA_VCC(vcc);
667        if (!iavcc) {
668            printk("ia_tx_poll: iavcc is null\n");
669            dev_kfree_skb_any(skb);
670            goto out;
671        }
672
673        skb1 = skb_dequeue(&iavcc->txing_skb);
674        while (skb1 && (skb1 != skb)) {
675           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
676              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
677           }
678           IF_ERR(printk("Release the SKB not match\n");)
679           if ((vcc->pop) && (skb1->len != 0))
680           {
681              vcc->pop(vcc, skb1);
682              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
683                                                           (long)skb1);)
684           }
685           else 
686              dev_kfree_skb_any(skb1);
687           skb1 = skb_dequeue(&iavcc->txing_skb);
688        }                                                        
689        if (!skb1) {
690           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
691           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
692           break;
693        }
694        if ((vcc->pop) && (skb->len != 0))
695        {
696           vcc->pop(vcc, skb);
697           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
698        }
699        else 
700           dev_kfree_skb_any(skb);
701        kfree(rtne);
702     }
703     ia_que_tx(iadev);
704 out:
705     return;
706 }
707 #if 0
708 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
709 {
710         u32     t;
711         int     i;
712         /*
713          * Issue a command to enable writes to the NOVRAM
714          */
715         NVRAM_CMD (EXTEND + EWEN);
716         NVRAM_CLR_CE;
717         /*
718          * issue the write command
719          */
720         NVRAM_CMD(IAWRITE + addr);
721         /* 
722          * Send the data, starting with D15, then D14, and so on for 16 bits
723          */
724         for (i=15; i>=0; i--) {
725                 NVRAM_CLKOUT (val & 0x8000);
726                 val <<= 1;
727         }
728         NVRAM_CLR_CE;
729         CFG_OR(NVCE);
730         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
731         while (!(t & NVDO))
732                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
733
734         NVRAM_CLR_CE;
735         /*
736          * disable writes again
737          */
738         NVRAM_CMD(EXTEND + EWDS)
739         NVRAM_CLR_CE;
740         CFG_AND(~NVDI);
741 }
742 #endif
743
744 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
745 {
746         u_short val;
747         u32     t;
748         int     i;
749         /*
750          * Read the first bit that was clocked with the falling edge of the
751          * the last command data clock
752          */
753         NVRAM_CMD(IAREAD + addr);
754         /*
755          * Now read the rest of the bits, the next bit read is D14, then D13,
756          * and so on.
757          */
758         val = 0;
759         for (i=15; i>=0; i--) {
760                 NVRAM_CLKIN(t);
761                 val |= (t << i);
762         }
763         NVRAM_CLR_CE;
764         CFG_AND(~NVDI);
765         return val;
766 }
767
768 static void ia_hw_type(IADEV *iadev) {
769    u_short memType = ia_eeprom_get(iadev, 25);   
770    iadev->memType = memType;
771    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
772       iadev->num_tx_desc = IA_TX_BUF;
773       iadev->tx_buf_sz = IA_TX_BUF_SZ;
774       iadev->num_rx_desc = IA_RX_BUF;
775       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
776    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
777       if (IA_TX_BUF == DFL_TX_BUFFERS)
778         iadev->num_tx_desc = IA_TX_BUF / 2;
779       else 
780         iadev->num_tx_desc = IA_TX_BUF;
781       iadev->tx_buf_sz = IA_TX_BUF_SZ;
782       if (IA_RX_BUF == DFL_RX_BUFFERS)
783         iadev->num_rx_desc = IA_RX_BUF / 2;
784       else
785         iadev->num_rx_desc = IA_RX_BUF;
786       iadev->rx_buf_sz = IA_RX_BUF_SZ;
787    }
788    else {
789       if (IA_TX_BUF == DFL_TX_BUFFERS) 
790         iadev->num_tx_desc = IA_TX_BUF / 8;
791       else
792         iadev->num_tx_desc = IA_TX_BUF;
793       iadev->tx_buf_sz = IA_TX_BUF_SZ;
794       if (IA_RX_BUF == DFL_RX_BUFFERS)
795         iadev->num_rx_desc = IA_RX_BUF / 8;
796       else
797         iadev->num_rx_desc = IA_RX_BUF;
798       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
799    } 
800    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
801    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
802          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
803          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
804
805 #if 0
806    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
807       iadev->phy_type = PHY_OC3C_S;
808    else if ((memType & FE_MASK) == FE_UTP_OPTION)
809       iadev->phy_type = PHY_UTP155;
810    else
811      iadev->phy_type = PHY_OC3C_M;
812 #endif
813    
814    iadev->phy_type = memType & FE_MASK;
815    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
816                                          memType,iadev->phy_type);)
817    if (iadev->phy_type == FE_25MBIT_PHY) 
818       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
819    else if (iadev->phy_type == FE_DS3_PHY)
820       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
821    else if (iadev->phy_type == FE_E3_PHY) 
822       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
823    else
824        iadev->LineRate = (u32)(ATM_OC3_PCR);
825    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
826
827 }
828
829 static void IaFrontEndIntr(IADEV *iadev) {
830   volatile IA_SUNI *suni;
831   volatile ia_mb25_t *mb25;
832   volatile suni_pm7345_t *suni_pm7345;
833   u32 intr_status;
834   u_int frmr_intr;
835
836   if(iadev->phy_type & FE_25MBIT_PHY) {
837      mb25 = (ia_mb25_t*)iadev->phy;
838      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
839   } else if (iadev->phy_type & FE_DS3_PHY) {
840      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
841      /* clear FRMR interrupts */
842      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat; 
843      iadev->carrier_detect =  
844            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
845   } else if (iadev->phy_type & FE_E3_PHY ) {
846      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
847      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
848      iadev->carrier_detect =
849            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
850   }
851   else { 
852      suni = (IA_SUNI *)iadev->phy;
853      intr_status = suni->suni_rsop_status & 0xff;
854      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
855   }
856   if (iadev->carrier_detect)
857     printk("IA: SUNI carrier detected\n");
858   else
859     printk("IA: SUNI carrier lost signal\n"); 
860   return;
861 }
862
863 static void ia_mb25_init (IADEV *iadev)
864 {
865    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
866 #if 0
867    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
868 #endif
869    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
870    mb25->mb25_diag_control = 0;
871    /*
872     * Initialize carrier detect state
873     */
874    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
875    return;
876 }                   
877
878 static void ia_suni_pm7345_init (IADEV *iadev)
879 {
880    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
881    if (iadev->phy_type & FE_DS3_PHY)
882    {
883       iadev->carrier_detect = 
884           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
885       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
886       suni_pm7345->suni_ds3_frm_cfg = 1;
887       suni_pm7345->suni_ds3_tran_cfg = 1;
888       suni_pm7345->suni_config = 0;
889       suni_pm7345->suni_splr_cfg = 0;
890       suni_pm7345->suni_splt_cfg = 0;
891    }
892    else 
893    {
894       iadev->carrier_detect = 
895           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
896       suni_pm7345->suni_e3_frm_fram_options = 0x4;
897       suni_pm7345->suni_e3_frm_maint_options = 0x20;
898       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
899       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
900       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
901       suni_pm7345->suni_e3_tran_fram_options = 0x1;
902       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
903       suni_pm7345->suni_splr_cfg = 0x41;
904       suni_pm7345->suni_splt_cfg = 0x41;
905    } 
906    /*
907     * Enable RSOP loss of signal interrupt.
908     */
909    suni_pm7345->suni_intr_enbl = 0x28;
910  
911    /*
912     * Clear error counters
913     */
914    suni_pm7345->suni_id_reset = 0;
915
916    /*
917     * Clear "PMCTST" in master test register.
918     */
919    suni_pm7345->suni_master_test = 0;
920
921    suni_pm7345->suni_rxcp_ctrl = 0x2c;
922    suni_pm7345->suni_rxcp_fctrl = 0x81;
923  
924    suni_pm7345->suni_rxcp_idle_pat_h1 =
925         suni_pm7345->suni_rxcp_idle_pat_h2 =
926         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
927    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
928  
929    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
930    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
931    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
932    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
933  
934    suni_pm7345->suni_rxcp_cell_pat_h1 =
935         suni_pm7345->suni_rxcp_cell_pat_h2 =
936         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
937    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
938  
939    suni_pm7345->suni_rxcp_cell_mask_h1 =
940         suni_pm7345->suni_rxcp_cell_mask_h2 =
941         suni_pm7345->suni_rxcp_cell_mask_h3 =
942         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
943  
944    suni_pm7345->suni_txcp_ctrl = 0xa4;
945    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
946    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
947  
948    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
949                                  SUNI_PM7345_CLB |
950                                  SUNI_PM7345_DLB |
951                                   SUNI_PM7345_PLB);
952 #ifdef __SNMP__
953    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
954 #endif /* __SNMP__ */
955    return;
956 }
957
958
959 /***************************** IA_LIB END *****************************/
960     
961 #ifdef CONFIG_ATM_IA_DEBUG
962 static int tcnter = 0;
963 static void xdump( u_char*  cp, int  length, char*  prefix )
964 {
965     int col, count;
966     u_char prntBuf[120];
967     u_char*  pBuf = prntBuf;
968     count = 0;
969     while(count < length){
970         pBuf += sprintf( pBuf, "%s", prefix );
971         for(col = 0;count + col < length && col < 16; col++){
972             if (col != 0 && (col % 4) == 0)
973                 pBuf += sprintf( pBuf, " " );
974             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
975         }
976         while(col++ < 16){      /* pad end of buffer with blanks */
977             if ((col % 4) == 0)
978                 sprintf( pBuf, " " );
979             pBuf += sprintf( pBuf, "   " );
980         }
981         pBuf += sprintf( pBuf, "  " );
982         for(col = 0;count + col < length && col < 16; col++){
983             if (isprint((int)cp[count + col]))
984                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
985             else
986                 pBuf += sprintf( pBuf, "." );
987                 }
988         sprintf( pBuf, "\n" );
989         // SPrint(prntBuf);
990         printk(prntBuf);
991         count += col;
992         pBuf = prntBuf;
993     }
994
995 }  /* close xdump(... */
996 #endif /* CONFIG_ATM_IA_DEBUG */
997
998   
999 static struct atm_dev *ia_boards = NULL;  
1000   
1001 #define ACTUAL_RAM_BASE \
1002         RAM_BASE*((iadev->mem)/(128 * 1024))  
1003 #define ACTUAL_SEG_RAM_BASE \
1004         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1005 #define ACTUAL_REASS_RAM_BASE \
1006         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1007   
1008   
1009 /*-- some utilities and memory allocation stuff will come here -------------*/  
1010   
1011 static void desc_dbg(IADEV *iadev) {
1012
1013   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1014   u32 i;
1015   void __iomem *tmp;
1016   // regval = readl((u32)ia_cmds->maddr);
1017   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1018   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1019                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1020                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1021   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1022                    iadev->ffL.tcq_rd);
1023   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1024   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1025   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1026   i = 0;
1027   while (tcq_st_ptr != tcq_ed_ptr) {
1028       tmp = iadev->seg_ram+tcq_st_ptr;
1029       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1030       tcq_st_ptr += 2;
1031   }
1032   for(i=0; i <iadev->num_tx_desc; i++)
1033       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1034
1035   
1036   
1037 /*----------------------------- Recieving side stuff --------------------------*/  
1038  
1039 static void rx_excp_rcvd(struct atm_dev *dev)  
1040 {  
1041 #if 0 /* closing the receiving size will cause too many excp int */  
1042   IADEV *iadev;  
1043   u_short state;  
1044   u_short excpq_rd_ptr;  
1045   //u_short *ptr;  
1046   int vci, error = 1;  
1047   iadev = INPH_IA_DEV(dev);  
1048   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1049   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1050   { printk("state = %x \n", state); 
1051         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1052  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1053         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1054             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1055         // TODO: update exception stat
1056         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1057         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1058         // pwang_test
1059         excpq_rd_ptr += 4;  
1060         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1061             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1062         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1063         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1064   }  
1065 #endif
1066 }  
1067   
1068 static void free_desc(struct atm_dev *dev, int desc)  
1069 {  
1070         IADEV *iadev;  
1071         iadev = INPH_IA_DEV(dev);  
1072         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1073         iadev->rfL.fdq_wr +=2;
1074         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1075                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1076         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1077 }  
1078   
1079   
1080 static int rx_pkt(struct atm_dev *dev)  
1081 {  
1082         IADEV *iadev;  
1083         struct atm_vcc *vcc;  
1084         unsigned short status;  
1085         struct rx_buf_desc __iomem *buf_desc_ptr;  
1086         int desc;   
1087         struct dle* wr_ptr;  
1088         int len;  
1089         struct sk_buff *skb;  
1090         u_int buf_addr, dma_addr;  
1091
1092         iadev = INPH_IA_DEV(dev);  
1093         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1094         {  
1095             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1096             return -EINVAL;  
1097         }  
1098         /* mask 1st 3 bits to get the actual descno. */  
1099         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1100         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1101                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1102               printk(" pcq_wr_ptr = 0x%x\n",
1103                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1104         /* update the read pointer  - maybe we shud do this in the end*/  
1105         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1106                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1107         else  
1108                 iadev->rfL.pcq_rd += 2;
1109         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1110   
1111         /* get the buffer desc entry.  
1112                 update stuff. - doesn't seem to be any update necessary  
1113         */  
1114         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1115         /* make the ptr point to the corresponding buffer desc entry */  
1116         buf_desc_ptr += desc;     
1117         if (!desc || (desc > iadev->num_rx_desc) || 
1118                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1119             free_desc(dev, desc);
1120             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1121             return -1;
1122         }
1123         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1124         if (!vcc)  
1125         {      
1126                 free_desc(dev, desc); 
1127                 printk("IA: null vcc, drop PDU\n");  
1128                 return -1;  
1129         }  
1130           
1131   
1132         /* might want to check the status bits for errors */  
1133         status = (u_short) (buf_desc_ptr->desc_mode);  
1134         if (status & (RX_CER | RX_PTE | RX_OFL))  
1135         {  
1136                 atomic_inc(&vcc->stats->rx_err);
1137                 IF_ERR(printk("IA: bad packet, dropping it");)  
1138                 if (status & RX_CER) { 
1139                     IF_ERR(printk(" cause: packet CRC error\n");)
1140                 }
1141                 else if (status & RX_PTE) {
1142                     IF_ERR(printk(" cause: packet time out\n");)
1143                 }
1144                 else {
1145                     IF_ERR(printk(" cause: buffer over flow\n");)
1146                 }
1147                 goto out_free_desc;
1148         }  
1149   
1150         /*  
1151                 build DLE.        
1152         */  
1153   
1154         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1155         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1156         len = dma_addr - buf_addr;  
1157         if (len > iadev->rx_buf_sz) {
1158            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1159            atomic_inc(&vcc->stats->rx_err);
1160            goto out_free_desc;
1161         }
1162                   
1163         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1164            if (vcc->vci < 32)
1165               printk("Drop control packets\n");
1166               goto out_free_desc;
1167         }
1168         skb_put(skb,len);  
1169         // pwang_test
1170         ATM_SKB(skb)->vcc = vcc;
1171         ATM_DESC(skb) = desc;        
1172         skb_queue_tail(&iadev->rx_dma_q, skb);  
1173
1174         /* Build the DLE structure */  
1175         wr_ptr = iadev->rx_dle_q.write;  
1176         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1177                 len, PCI_DMA_FROMDEVICE);
1178         wr_ptr->local_pkt_addr = buf_addr;  
1179         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1180         wr_ptr->mode = DMA_INT_ENABLE;  
1181   
1182         /* shud take care of wrap around here too. */  
1183         if(++wr_ptr == iadev->rx_dle_q.end)
1184              wr_ptr = iadev->rx_dle_q.start;
1185         iadev->rx_dle_q.write = wr_ptr;  
1186         udelay(1);  
1187         /* Increment transaction counter */  
1188         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1189 out:    return 0;  
1190 out_free_desc:
1191         free_desc(dev, desc);
1192         goto out;
1193 }  
1194   
1195 static void rx_intr(struct atm_dev *dev)  
1196 {  
1197   IADEV *iadev;  
1198   u_short status;  
1199   u_short state, i;  
1200   
1201   iadev = INPH_IA_DEV(dev);  
1202   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1203   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1204   if (status & RX_PKT_RCVD)  
1205   {  
1206         /* do something */  
1207         /* Basically recvd an interrupt for receving a packet.  
1208         A descriptor would have been written to the packet complete   
1209         queue. Get all the descriptors and set up dma to move the   
1210         packets till the packet complete queue is empty..  
1211         */  
1212         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1213         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1214         while(!(state & PCQ_EMPTY))  
1215         {  
1216              rx_pkt(dev);  
1217              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1218         }  
1219         iadev->rxing = 1;
1220   }  
1221   if (status & RX_FREEQ_EMPT)  
1222   {   
1223      if (iadev->rxing) {
1224         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1225         iadev->rx_tmp_jif = jiffies; 
1226         iadev->rxing = 0;
1227      } 
1228      else if (((jiffies - iadev->rx_tmp_jif) > 50) && 
1229                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1230         for (i = 1; i <= iadev->num_rx_desc; i++)
1231                free_desc(dev, i);
1232 printk("Test logic RUN!!!!\n");
1233         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1234         iadev->rxing = 1;
1235      }
1236      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1237   }  
1238
1239   if (status & RX_EXCP_RCVD)  
1240   {  
1241         /* probably need to handle the exception queue also. */  
1242         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1243         rx_excp_rcvd(dev);  
1244   }  
1245
1246
1247   if (status & RX_RAW_RCVD)  
1248   {  
1249         /* need to handle the raw incoming cells. This deepnds on   
1250         whether we have programmed to receive the raw cells or not.  
1251         Else ignore. */  
1252         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1253   }  
1254 }  
1255   
1256   
1257 static void rx_dle_intr(struct atm_dev *dev)  
1258 {  
1259   IADEV *iadev;  
1260   struct atm_vcc *vcc;   
1261   struct sk_buff *skb;  
1262   int desc;  
1263   u_short state;   
1264   struct dle *dle, *cur_dle;  
1265   u_int dle_lp;  
1266   int len;
1267   iadev = INPH_IA_DEV(dev);  
1268  
1269   /* free all the dles done, that is just update our own dle read pointer   
1270         - do we really need to do this. Think not. */  
1271   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1272         and push them up to the higher layer protocol. Also free the desc  
1273         associated with the buffer. */  
1274   dle = iadev->rx_dle_q.read;  
1275   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1276   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1277   while(dle != cur_dle)  
1278   {  
1279       /* free the DMAed skb */  
1280       skb = skb_dequeue(&iadev->rx_dma_q);  
1281       if (!skb)  
1282          goto INCR_DLE;
1283       desc = ATM_DESC(skb);
1284       free_desc(dev, desc);  
1285                
1286       if (!(len = skb->len))
1287       {  
1288           printk("rx_dle_intr: skb len 0\n");  
1289           dev_kfree_skb_any(skb);  
1290       }  
1291       else  
1292       {  
1293           struct cpcs_trailer *trailer;
1294           u_short length;
1295           struct ia_vcc *ia_vcc;
1296
1297           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1298                 len, PCI_DMA_FROMDEVICE);
1299           /* no VCC related housekeeping done as yet. lets see */  
1300           vcc = ATM_SKB(skb)->vcc;
1301           if (!vcc) {
1302               printk("IA: null vcc\n");  
1303               dev_kfree_skb_any(skb);
1304               goto INCR_DLE;
1305           }
1306           ia_vcc = INPH_IA_VCC(vcc);
1307           if (ia_vcc == NULL)
1308           {
1309              atomic_inc(&vcc->stats->rx_err);
1310              dev_kfree_skb_any(skb);
1311              atm_return(vcc, atm_guess_pdu2truesize(len));
1312              goto INCR_DLE;
1313            }
1314           // get real pkt length  pwang_test
1315           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1316                                  skb->len - sizeof(*trailer));
1317           length =  swap(trailer->length);
1318           if ((length > iadev->rx_buf_sz) || (length > 
1319                               (skb->len - sizeof(struct cpcs_trailer))))
1320           {
1321              atomic_inc(&vcc->stats->rx_err);
1322              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1323                                                             length, skb->len);)
1324              dev_kfree_skb_any(skb);
1325              atm_return(vcc, atm_guess_pdu2truesize(len));
1326              goto INCR_DLE;
1327           }
1328           skb_trim(skb, length);
1329           
1330           /* Display the packet */  
1331           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1332           xdump(skb->data, skb->len, "RX: ");
1333           printk("\n");)
1334
1335           IF_RX(printk("rx_dle_intr: skb push");)  
1336           vcc->push(vcc,skb);  
1337           atomic_inc(&vcc->stats->rx);
1338           iadev->rx_pkt_cnt++;
1339       }  
1340 INCR_DLE:
1341       if (++dle == iadev->rx_dle_q.end)  
1342           dle = iadev->rx_dle_q.start;  
1343   }  
1344   iadev->rx_dle_q.read = dle;  
1345   
1346   /* if the interrupts are masked because there were no free desc available,  
1347                 unmask them now. */ 
1348   if (!iadev->rxing) {
1349      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1350      if (!(state & FREEQ_EMPTY)) {
1351         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1352         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1353                                       iadev->reass_reg+REASS_MASK_REG);
1354         iadev->rxing++; 
1355      }
1356   }
1357 }  
1358   
1359   
1360 static int open_rx(struct atm_vcc *vcc)  
1361 {  
1362         IADEV *iadev;  
1363         u_short __iomem *vc_table;  
1364         u_short __iomem *reass_ptr;  
1365         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1366
1367         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1368         iadev = INPH_IA_DEV(vcc->dev);  
1369         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1370            if (iadev->phy_type & FE_25MBIT_PHY) {
1371                printk("IA:  ABR not support\n");
1372                return -EINVAL; 
1373            }
1374         }
1375         /* Make only this VCI in the vc table valid and let all   
1376                 others be invalid entries */  
1377         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1378         vc_table += vcc->vci;
1379         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1380
1381         *vc_table = vcc->vci << 6;
1382         /* Also keep a list of open rx vcs so that we can attach them with  
1383                 incoming PDUs later. */  
1384         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1385                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1386         {  
1387                 srv_cls_param_t srv_p;
1388                 init_abr_vc(iadev, &srv_p);
1389                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1390         } 
1391         else {  /* for UBR  later may need to add CBR logic */
1392                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1393                 reass_ptr += vcc->vci;
1394                 *reass_ptr = NO_AAL5_PKT;
1395         }
1396         
1397         if (iadev->rx_open[vcc->vci])  
1398                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1399                         vcc->dev->number, vcc->vci);  
1400         iadev->rx_open[vcc->vci] = vcc;  
1401         return 0;  
1402 }  
1403   
1404 static int rx_init(struct atm_dev *dev)  
1405 {  
1406         IADEV *iadev;  
1407         struct rx_buf_desc __iomem *buf_desc_ptr;  
1408         unsigned long rx_pkt_start = 0;  
1409         void *dle_addr;  
1410         struct abr_vc_table  *abr_vc_table; 
1411         u16 *vc_table;  
1412         u16 *reass_table;  
1413         u16 *ptr16;
1414         int i,j, vcsize_sel;  
1415         u_short freeq_st_adr;  
1416         u_short *freeq_start;  
1417   
1418         iadev = INPH_IA_DEV(dev);  
1419   //    spin_lock_init(&iadev->rx_lock); 
1420   
1421         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1422         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1423                                         &iadev->rx_dle_dma);  
1424         if (!dle_addr)  {  
1425                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1426                 goto err_out;
1427         }
1428         iadev->rx_dle_q.start = (struct dle*)dle_addr;  
1429         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1430         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1431         iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1432         /* the end of the dle q points to the entry after the last  
1433         DLE that can be used. */  
1434   
1435         /* write the upper 20 bits of the start address to rx list address register */  
1436         writel(iadev->rx_dle_dma & 0xfffff000,
1437                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1438         IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n", 
1439                       (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR), 
1440                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1441         printk("Rx Dle list addr: 0x%08x value: 0x%0x\n", 
1442                       (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR), 
1443                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1444   
1445         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1446         writew(0, iadev->reass_reg+MODE_REG);  
1447         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1448   
1449         /* Receive side control memory map  
1450            -------------------------------  
1451   
1452                 Buffer descr    0x0000 (736 - 23K)  
1453                 VP Table        0x5c00 (256 - 512)  
1454                 Except q        0x5e00 (128 - 512)  
1455                 Free buffer q   0x6000 (1K - 2K)  
1456                 Packet comp q   0x6800 (1K - 2K)  
1457                 Reass Table     0x7000 (1K - 2K)  
1458                 VC Table        0x7800 (1K - 2K)  
1459                 ABR VC Table    0x8000 (1K - 32K)  
1460         */  
1461           
1462         /* Base address for Buffer Descriptor Table */  
1463         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1464         /* Set the buffer size register */  
1465         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1466   
1467         /* Initialize each entry in the Buffer Descriptor Table */  
1468         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1469         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1470         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1471         buf_desc_ptr++;  
1472         rx_pkt_start = iadev->rx_pkt_ram;  
1473         for(i=1; i<=iadev->num_rx_desc; i++)  
1474         {  
1475                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1476                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1477                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1478                 buf_desc_ptr++;           
1479                 rx_pkt_start += iadev->rx_buf_sz;  
1480         }  
1481         IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)  
1482         i = FREE_BUF_DESC_Q*iadev->memSize; 
1483         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1484         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1485         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1486                                          iadev->reass_reg+FREEQ_ED_ADR);
1487         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1488         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1489                                         iadev->reass_reg+FREEQ_WR_PTR);    
1490         /* Fill the FREEQ with all the free descriptors. */  
1491         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1492         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1493         for(i=1; i<=iadev->num_rx_desc; i++)  
1494         {  
1495                 *freeq_start = (u_short)i;  
1496                 freeq_start++;  
1497         }  
1498         IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)  
1499         /* Packet Complete Queue */
1500         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1501         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1502         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1503         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1504         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1505
1506         /* Exception Queue */
1507         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1508         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1509         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1510                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1511         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1512         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1513  
1514         /* Load local copy of FREEQ and PCQ ptrs */
1515         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1516         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1517         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1518         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1519         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1520         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1521         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1522         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1523         
1524         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1525               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1526               iadev->rfL.pcq_wr);)                
1527         /* just for check - no VP TBL */  
1528         /* VP Table */  
1529         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1530         /* initialize VP Table for invalid VPIs  
1531                 - I guess we can write all 1s or 0x000f in the entire memory  
1532                   space or something similar.  
1533         */  
1534   
1535         /* This seems to work and looks right to me too !!! */  
1536         i =  REASS_TABLE * iadev->memSize;
1537         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1538         /* initialize Reassembly table to I don't know what ???? */  
1539         reass_table = (u16 *)(iadev->reass_ram+i);  
1540         j = REASS_TABLE_SZ * iadev->memSize;
1541         for(i=0; i < j; i++)  
1542                 *reass_table++ = NO_AAL5_PKT;  
1543        i = 8*1024;
1544        vcsize_sel =  0;
1545        while (i != iadev->num_vc) {
1546           i /= 2;
1547           vcsize_sel++;
1548        }
1549        i = RX_VC_TABLE * iadev->memSize;
1550        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1551        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1552         j = RX_VC_TABLE_SZ * iadev->memSize;
1553         for(i = 0; i < j; i++)  
1554         {  
1555                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1556                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1557                 is those low 3 bits.   
1558                 Shall program this later.  
1559                 */  
1560                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1561                 vc_table++;  
1562         }  
1563         /* ABR VC table */
1564         i =  ABR_VC_TABLE * iadev->memSize;
1565         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1566                    
1567         i = ABR_VC_TABLE * iadev->memSize;
1568         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1569         j = REASS_TABLE_SZ * iadev->memSize;
1570         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1571         for(i = 0; i < j; i++) {                
1572                 abr_vc_table->rdf = 0x0003;
1573                 abr_vc_table->air = 0x5eb1;
1574                 abr_vc_table++;         
1575         }  
1576
1577         /* Initialize other registers */  
1578   
1579         /* VP Filter Register set for VC Reassembly only */  
1580         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1581         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1582         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1583
1584         /* Packet Timeout Count  related Registers : 
1585            Set packet timeout to occur in about 3 seconds
1586            Set Packet Aging Interval count register to overflow in about 4 us
1587         */  
1588         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1589         ptr16 = (u16*)j;
1590         i = ((u32)ptr16 >> 6) & 0xff;
1591         ptr16  += j - 1;
1592         i |=(((u32)ptr16 << 2) & 0xff00);
1593         writew(i, iadev->reass_reg+TMOUT_RANGE);
1594         /* initiate the desc_tble */
1595         for(i=0; i<iadev->num_tx_desc;i++)
1596             iadev->desc_tbl[i].timestamp = 0;
1597
1598         /* to clear the interrupt status register - read it */  
1599         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1600   
1601         /* Mask Register - clear it */  
1602         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1603   
1604         skb_queue_head_init(&iadev->rx_dma_q);  
1605         iadev->rx_free_desc_qhead = NULL;   
1606
1607         iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1608         if (!iadev->rx_open) {
1609                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1610                 dev->number);  
1611                 goto err_free_dle;
1612         }  
1613
1614         iadev->rxing = 1;
1615         iadev->rx_pkt_cnt = 0;
1616         /* Mode Register */  
1617         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1618         return 0;  
1619
1620 err_free_dle:
1621         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1622                             iadev->rx_dle_dma);  
1623 err_out:
1624         return -ENOMEM;
1625 }  
1626   
1627
1628 /*  
1629         The memory map suggested in appendix A and the coding for it.   
1630         Keeping it around just in case we change our mind later.  
1631   
1632                 Buffer descr    0x0000 (128 - 4K)  
1633                 UBR sched       0x1000 (1K - 4K)  
1634                 UBR Wait q      0x2000 (1K - 4K)  
1635                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1636                                         (128 - 256) each  
1637                 extended VC     0x4000 (1K - 8K)  
1638                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1639                 CBR sched       0x7000 (as needed)  
1640                 VC table        0x8000 (1K - 32K)  
1641 */  
1642   
1643 static void tx_intr(struct atm_dev *dev)  
1644 {  
1645         IADEV *iadev;  
1646         unsigned short status;  
1647         unsigned long flags;
1648
1649         iadev = INPH_IA_DEV(dev);  
1650   
1651         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1652         if (status & TRANSMIT_DONE){
1653
1654            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1655            spin_lock_irqsave(&iadev->tx_lock, flags);
1656            ia_tx_poll(iadev);
1657            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1658            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1659            if (iadev->close_pending)  
1660                wake_up(&iadev->close_wait);
1661         }         
1662         if (status & TCQ_NOT_EMPTY)  
1663         {  
1664             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1665         }  
1666 }  
1667   
1668 static void tx_dle_intr(struct atm_dev *dev)
1669 {
1670         IADEV *iadev;
1671         struct dle *dle, *cur_dle; 
1672         struct sk_buff *skb;
1673         struct atm_vcc *vcc;
1674         struct ia_vcc  *iavcc;
1675         u_int dle_lp;
1676         unsigned long flags;
1677
1678         iadev = INPH_IA_DEV(dev);
1679         spin_lock_irqsave(&iadev->tx_lock, flags);   
1680         dle = iadev->tx_dle_q.read;
1681         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1682                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1683         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1684         while (dle != cur_dle)
1685         {
1686             /* free the DMAed skb */ 
1687             skb = skb_dequeue(&iadev->tx_dma_q); 
1688             if (!skb) break;
1689
1690             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1691             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1692                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1693                                  PCI_DMA_TODEVICE);
1694             }
1695             vcc = ATM_SKB(skb)->vcc;
1696             if (!vcc) {
1697                   printk("tx_dle_intr: vcc is null\n");
1698                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1699                   dev_kfree_skb_any(skb);
1700
1701                   return;
1702             }
1703             iavcc = INPH_IA_VCC(vcc);
1704             if (!iavcc) {
1705                   printk("tx_dle_intr: iavcc is null\n");
1706                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1707                   dev_kfree_skb_any(skb);
1708                   return;
1709             }
1710             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1711                if ((vcc->pop) && (skb->len != 0))
1712                {     
1713                  vcc->pop(vcc, skb);
1714                } 
1715                else {
1716                  dev_kfree_skb_any(skb);
1717                }
1718             }
1719             else { /* Hold the rate-limited skb for flow control */
1720                IA_SKB_STATE(skb) |= IA_DLED;
1721                skb_queue_tail(&iavcc->txing_skb, skb);
1722             }
1723             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1724             if (++dle == iadev->tx_dle_q.end)
1725                  dle = iadev->tx_dle_q.start;
1726         }
1727         iadev->tx_dle_q.read = dle;
1728         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1729 }
1730   
1731 static int open_tx(struct atm_vcc *vcc)  
1732 {  
1733         struct ia_vcc *ia_vcc;  
1734         IADEV *iadev;  
1735         struct main_vc *vc;  
1736         struct ext_vc *evc;  
1737         int ret;
1738         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1739         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1740         iadev = INPH_IA_DEV(vcc->dev);  
1741         
1742         if (iadev->phy_type & FE_25MBIT_PHY) {
1743            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1744                printk("IA:  ABR not support\n");
1745                return -EINVAL; 
1746            }
1747           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1748                printk("IA:  CBR not support\n");
1749                return -EINVAL; 
1750           }
1751         }
1752         ia_vcc =  INPH_IA_VCC(vcc);
1753         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1754         if (vcc->qos.txtp.max_sdu > 
1755                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1756            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1757                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1758            vcc->dev_data = NULL;
1759            kfree(ia_vcc);
1760            return -EINVAL; 
1761         }
1762         ia_vcc->vc_desc_cnt = 0;
1763         ia_vcc->txing = 1;
1764
1765         /* find pcr */
1766         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1767            vcc->qos.txtp.pcr = iadev->LineRate;
1768         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1769            vcc->qos.txtp.pcr = iadev->LineRate;
1770         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1771            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1772         if (vcc->qos.txtp.pcr > iadev->LineRate)
1773              vcc->qos.txtp.pcr = iadev->LineRate;
1774         ia_vcc->pcr = vcc->qos.txtp.pcr;
1775
1776         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1777         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1778         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1779         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1780         if (ia_vcc->pcr < iadev->rate_limit)
1781            skb_queue_head_init (&ia_vcc->txing_skb);
1782         if (ia_vcc->pcr < iadev->rate_limit) {
1783            struct sock *sk = sk_atm(vcc);
1784
1785            if (vcc->qos.txtp.max_sdu != 0) {
1786                if (ia_vcc->pcr > 60000)
1787                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1788                else if (ia_vcc->pcr > 2000)
1789                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1790                else
1791                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1792            }
1793            else
1794              sk->sk_sndbuf = 24576;
1795         }
1796            
1797         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1798         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1799         vc += vcc->vci;  
1800         evc += vcc->vci;  
1801         memset((caddr_t)vc, 0, sizeof(*vc));  
1802         memset((caddr_t)evc, 0, sizeof(*evc));  
1803           
1804         /* store the most significant 4 bits of vci as the last 4 bits   
1805                 of first part of atm header.  
1806            store the last 12 bits of vci as first 12 bits of the second  
1807                 part of the atm header.  
1808         */  
1809         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1810         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1811  
1812         /* check the following for different traffic classes */  
1813         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1814         {  
1815                 vc->type = UBR;  
1816                 vc->status = CRC_APPEND;
1817                 vc->acr = cellrate_to_float(iadev->LineRate);  
1818                 if (vcc->qos.txtp.pcr > 0) 
1819                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1820                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1821                                              vcc->qos.txtp.max_pcr,vc->acr);)
1822         }  
1823         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1824         {       srv_cls_param_t srv_p;
1825                 IF_ABR(printk("Tx ABR VCC\n");)  
1826                 init_abr_vc(iadev, &srv_p);
1827                 if (vcc->qos.txtp.pcr > 0) 
1828                    srv_p.pcr = vcc->qos.txtp.pcr;
1829                 if (vcc->qos.txtp.min_pcr > 0) {
1830                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1831                    if (tmpsum > iadev->LineRate)
1832                        return -EBUSY;
1833                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1834                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1835                 } 
1836                 else srv_p.mcr = 0;
1837                 if (vcc->qos.txtp.icr)
1838                    srv_p.icr = vcc->qos.txtp.icr;
1839                 if (vcc->qos.txtp.tbe)
1840                    srv_p.tbe = vcc->qos.txtp.tbe;
1841                 if (vcc->qos.txtp.frtt)
1842                    srv_p.frtt = vcc->qos.txtp.frtt;
1843                 if (vcc->qos.txtp.rif)
1844                    srv_p.rif = vcc->qos.txtp.rif;
1845                 if (vcc->qos.txtp.rdf)
1846                    srv_p.rdf = vcc->qos.txtp.rdf;
1847                 if (vcc->qos.txtp.nrm_pres)
1848                    srv_p.nrm = vcc->qos.txtp.nrm;
1849                 if (vcc->qos.txtp.trm_pres)
1850                    srv_p.trm = vcc->qos.txtp.trm;
1851                 if (vcc->qos.txtp.adtf_pres)
1852                    srv_p.adtf = vcc->qos.txtp.adtf;
1853                 if (vcc->qos.txtp.cdf_pres)
1854                    srv_p.cdf = vcc->qos.txtp.cdf;    
1855                 if (srv_p.icr > srv_p.pcr)
1856                    srv_p.icr = srv_p.pcr;    
1857                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1858                                                       srv_p.pcr, srv_p.mcr);)
1859                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1860         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1861                 if (iadev->phy_type & FE_25MBIT_PHY) {
1862                     printk("IA:  CBR not support\n");
1863                     return -EINVAL; 
1864                 }
1865                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1866                    IF_CBR(printk("PCR is not availble\n");)
1867                    return -1;
1868                 }
1869                 vc->type = CBR;
1870                 vc->status = CRC_APPEND;
1871                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1872                     return ret;
1873                 }
1874        } 
1875         else  
1876            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1877         
1878         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1879         IF_EVENT(printk("ia open_tx returning \n");)  
1880         return 0;  
1881 }  
1882   
1883   
1884 static int tx_init(struct atm_dev *dev)  
1885 {  
1886         IADEV *iadev;  
1887         struct tx_buf_desc *buf_desc_ptr;
1888         unsigned int tx_pkt_start;  
1889         void *dle_addr;  
1890         int i;  
1891         u_short tcq_st_adr;  
1892         u_short *tcq_start;  
1893         u_short prq_st_adr;  
1894         u_short *prq_start;  
1895         struct main_vc *vc;  
1896         struct ext_vc *evc;   
1897         u_short tmp16;
1898         u32 vcsize_sel;
1899  
1900         iadev = INPH_IA_DEV(dev);  
1901         spin_lock_init(&iadev->tx_lock);
1902  
1903         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1904                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1905
1906         /* Allocate 4k (boundary aligned) bytes */
1907         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1908                                         &iadev->tx_dle_dma);  
1909         if (!dle_addr)  {
1910                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1911                 goto err_out;
1912         }
1913         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1914         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1915         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1916         iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1917
1918         /* write the upper 20 bits of the start address to tx list address register */  
1919         writel(iadev->tx_dle_dma & 0xfffff000,
1920                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1921         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1922         writew(0, iadev->seg_reg+MODE_REG_0);  
1923         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1924         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1925         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1926         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1927   
1928         /*  
1929            Transmit side control memory map  
1930            --------------------------------    
1931          Buffer descr   0x0000 (128 - 4K)  
1932          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1933                                         (512 - 1K) each  
1934                                         TCQ - 4K, PRQ - 5K  
1935          CBR Table      0x1800 (as needed) - 6K  
1936          UBR Table      0x3000 (1K - 4K) - 12K  
1937          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1938          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1939                                 ABR Tbl - 20K, ABR Wq - 22K   
1940          extended VC    0x6000 (1K - 8K) - 24K  
1941          VC Table       0x8000 (1K - 32K) - 32K  
1942           
1943         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1944         and Wait q, which can be allotted later.  
1945         */  
1946      
1947         /* Buffer Descriptor Table Base address */  
1948         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1949   
1950         /* initialize each entry in the buffer descriptor table */  
1951         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1952         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1953         buf_desc_ptr++;  
1954         tx_pkt_start = TX_PACKET_RAM;  
1955         for(i=1; i<=iadev->num_tx_desc; i++)  
1956         {  
1957                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1958                 buf_desc_ptr->desc_mode = AAL5;  
1959                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1960                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1961                 buf_desc_ptr++;           
1962                 tx_pkt_start += iadev->tx_buf_sz;  
1963         }  
1964         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1965         if (!iadev->tx_buf) {
1966             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1967             goto err_free_dle;
1968         }
1969         for (i= 0; i< iadev->num_tx_desc; i++)
1970         {
1971             struct cpcs_trailer *cpcs;
1972  
1973             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1974             if(!cpcs) {                
1975                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1976                 goto err_free_tx_bufs;
1977             }
1978             iadev->tx_buf[i].cpcs = cpcs;
1979             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1980                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1981         }
1982         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1983                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1984         if (!iadev->desc_tbl) {
1985                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1986                 goto err_free_all_tx_bufs;
1987         }
1988   
1989         /* Communication Queues base address */  
1990         i = TX_COMP_Q * iadev->memSize;
1991         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1992   
1993         /* Transmit Complete Queue */  
1994         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1995         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1996         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1997         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1998         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
1999                                               iadev->seg_reg+TCQ_ED_ADR); 
2000         /* Fill the TCQ with all the free descriptors. */  
2001         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
2002         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2003         for(i=1; i<=iadev->num_tx_desc; i++)  
2004         {  
2005                 *tcq_start = (u_short)i;  
2006                 tcq_start++;  
2007         }  
2008   
2009         /* Packet Ready Queue */  
2010         i = PKT_RDY_Q * iadev->memSize; 
2011         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2012         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2013                                               iadev->seg_reg+PRQ_ED_ADR);
2014         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2015         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2016          
2017         /* Load local copy of PRQ and TCQ ptrs */
2018         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2019         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2020         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2021
2022         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2023         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2024         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2025
2026         /* Just for safety initializing the queue to have desc 1 always */  
2027         /* Fill the PRQ with all the free descriptors. */  
2028         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2029         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2030         for(i=1; i<=iadev->num_tx_desc; i++)  
2031         {  
2032                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2033                 prq_start++;  
2034         }  
2035         /* CBR Table */  
2036         IF_INIT(printk("Start CBR Init\n");)
2037 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2038         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2039 #else /* Charlie's logic is wrong ? */
2040         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2041         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2042         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2043 #endif
2044
2045         IF_INIT(printk("value in register = 0x%x\n",
2046                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2047         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2048         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2049         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2050                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2051         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2052         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2053         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2054         IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2055                (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2056         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2057           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2058           readw(iadev->seg_reg+CBR_TAB_END+1));)
2059
2060         /* Initialize the CBR Schedualing Table */
2061         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2062                                                           0, iadev->num_vc*6); 
2063         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2064         iadev->CbrEntryPt = 0;
2065         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2066         iadev->NumEnabledCBR = 0;
2067
2068         /* UBR scheduling Table and wait queue */  
2069         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2070                 - SCHEDSZ is 1K (# of entries).  
2071                 - UBR Table size is 4K  
2072                 - UBR wait queue is 4K  
2073            since the table and wait queues are contiguous, all the bytes   
2074            can be initialized by one memeset.  
2075         */  
2076         
2077         vcsize_sel = 0;
2078         i = 8*1024;
2079         while (i != iadev->num_vc) {
2080           i /= 2;
2081           vcsize_sel++;
2082         }
2083  
2084         i = MAIN_VC_TABLE * iadev->memSize;
2085         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2086         i =  EXT_VC_TABLE * iadev->memSize;
2087         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2088         i = UBR_SCHED_TABLE * iadev->memSize;
2089         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2090         i = UBR_WAIT_Q * iadev->memSize; 
2091         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2092         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2093                                                        0, iadev->num_vc*8);
2094         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2095         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2096                 - SCHEDSZ is 1K (# of entries).  
2097                 - ABR Table size is 2K  
2098                 - ABR wait queue is 2K  
2099            since the table and wait queues are contiguous, all the bytes   
2100            can be intialized by one memeset.  
2101         */  
2102         i = ABR_SCHED_TABLE * iadev->memSize;
2103         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2104         i = ABR_WAIT_Q * iadev->memSize;
2105         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2106  
2107         i = ABR_SCHED_TABLE*iadev->memSize;
2108         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2109         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2110         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2111         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2112         if (!iadev->testTable) {
2113            printk("Get freepage  failed\n");
2114            goto err_free_desc_tbl;
2115         }
2116         for(i=0; i<iadev->num_vc; i++)  
2117         {  
2118                 memset((caddr_t)vc, 0, sizeof(*vc));  
2119                 memset((caddr_t)evc, 0, sizeof(*evc));  
2120                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2121                                                 GFP_KERNEL);
2122                 if (!iadev->testTable[i])
2123                         goto err_free_test_tables;
2124                 iadev->testTable[i]->lastTime = 0;
2125                 iadev->testTable[i]->fract = 0;
2126                 iadev->testTable[i]->vc_status = VC_UBR;
2127                 vc++;  
2128                 evc++;  
2129         }  
2130   
2131         /* Other Initialization */  
2132           
2133         /* Max Rate Register */  
2134         if (iadev->phy_type & FE_25MBIT_PHY) {
2135            writew(RATE25, iadev->seg_reg+MAXRATE);  
2136            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2137         }
2138         else {
2139            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2140            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2141         }
2142         /* Set Idle Header Reigisters to be sure */  
2143         writew(0, iadev->seg_reg+IDLEHEADHI);  
2144         writew(0, iadev->seg_reg+IDLEHEADLO);  
2145   
2146         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2147         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2148
2149         iadev->close_pending = 0;
2150         init_waitqueue_head(&iadev->close_wait);
2151         init_waitqueue_head(&iadev->timeout_wait);
2152         skb_queue_head_init(&iadev->tx_dma_q);  
2153         ia_init_rtn_q(&iadev->tx_return_q);  
2154
2155         /* RM Cell Protocol ID and Message Type */  
2156         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2157         skb_queue_head_init (&iadev->tx_backlog);
2158   
2159         /* Mode Register 1 */  
2160         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2161   
2162         /* Mode Register 0 */  
2163         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2164   
2165         /* Interrupt Status Register - read to clear */  
2166         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2167   
2168         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2169         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2170         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2171         iadev->tx_pkt_cnt = 0;
2172         iadev->rate_limit = iadev->LineRate / 3;
2173   
2174         return 0;
2175
2176 err_free_test_tables:
2177         while (--i >= 0)
2178                 kfree(iadev->testTable[i]);
2179         kfree(iadev->testTable);
2180 err_free_desc_tbl:
2181         kfree(iadev->desc_tbl);
2182 err_free_all_tx_bufs:
2183         i = iadev->num_tx_desc;
2184 err_free_tx_bufs:
2185         while (--i >= 0) {
2186                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2187
2188                 pci_unmap_single(iadev->pci, desc->dma_addr,
2189                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2190                 kfree(desc->cpcs);
2191         }
2192         kfree(iadev->tx_buf);
2193 err_free_dle:
2194         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2195                             iadev->tx_dle_dma);  
2196 err_out:
2197         return -ENOMEM;
2198 }   
2199    
2200 static irqreturn_t ia_int(int irq, void *dev_id)  
2201 {  
2202    struct atm_dev *dev;  
2203    IADEV *iadev;  
2204    unsigned int status;  
2205    int handled = 0;
2206
2207    dev = dev_id;  
2208    iadev = INPH_IA_DEV(dev);  
2209    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2210    { 
2211         handled = 1;
2212         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2213         if (status & STAT_REASSINT)  
2214         {  
2215            /* do something */  
2216            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2217            rx_intr(dev);  
2218         }  
2219         if (status & STAT_DLERINT)  
2220         {  
2221            /* Clear this bit by writing a 1 to it. */  
2222            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2223            rx_dle_intr(dev);  
2224         }  
2225         if (status & STAT_SEGINT)  
2226         {  
2227            /* do something */ 
2228            IF_EVENT(printk("IA: tx_intr \n");) 
2229            tx_intr(dev);  
2230         }  
2231         if (status & STAT_DLETINT)  
2232         {  
2233            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2234            tx_dle_intr(dev);  
2235         }  
2236         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2237         {  
2238            if (status & STAT_FEINT) 
2239                IaFrontEndIntr(iadev);
2240         }  
2241    }
2242    return IRQ_RETVAL(handled);
2243 }  
2244           
2245           
2246           
2247 /*----------------------------- entries --------------------------------*/  
2248 static int get_esi(struct atm_dev *dev)  
2249 {  
2250         IADEV *iadev;  
2251         int i;  
2252         u32 mac1;  
2253         u16 mac2;  
2254           
2255         iadev = INPH_IA_DEV(dev);  
2256         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2257                                 iadev->reg+IPHASE5575_MAC1)));  
2258         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2259         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2260         for (i=0; i<MAC1_LEN; i++)  
2261                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2262           
2263         for (i=0; i<MAC2_LEN; i++)  
2264                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2265         return 0;  
2266 }  
2267           
2268 static int reset_sar(struct atm_dev *dev)  
2269 {  
2270         IADEV *iadev;  
2271         int i, error = 1;  
2272         unsigned int pci[64];  
2273           
2274         iadev = INPH_IA_DEV(dev);  
2275         for(i=0; i<64; i++)  
2276           if ((error = pci_read_config_dword(iadev->pci,  
2277                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2278               return error;  
2279         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2280         for(i=0; i<64; i++)  
2281           if ((error = pci_write_config_dword(iadev->pci,  
2282                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2283             return error;  
2284         udelay(5);  
2285         return 0;  
2286 }  
2287           
2288           
2289 static int __devinit ia_init(struct atm_dev *dev)
2290 {  
2291         IADEV *iadev;  
2292         unsigned long real_base;
2293         void __iomem *base;
2294         unsigned short command;  
2295         int error, i; 
2296           
2297         /* The device has been identified and registered. Now we read   
2298            necessary configuration info like memory base address,   
2299            interrupt number etc */  
2300           
2301         IF_INIT(printk(">ia_init\n");)  
2302         dev->ci_range.vpi_bits = 0;  
2303         dev->ci_range.vci_bits = NR_VCI_LD;  
2304
2305         iadev = INPH_IA_DEV(dev);  
2306         real_base = pci_resource_start (iadev->pci, 0);
2307         iadev->irq = iadev->pci->irq;
2308                   
2309         error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2310         if (error) {
2311                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2312                                 dev->number,error);  
2313                 return -EINVAL;  
2314         }  
2315         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2316                         dev->number, iadev->pci->revision, real_base, iadev->irq);)
2317           
2318         /* find mapping size of board */  
2319           
2320         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2321
2322         if (iadev->pci_map_size == 0x100000){
2323           iadev->num_vc = 4096;
2324           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2325           iadev->memSize = 4;
2326         }
2327         else if (iadev->pci_map_size == 0x40000) {
2328           iadev->num_vc = 1024;
2329           iadev->memSize = 1;
2330         }
2331         else {
2332            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2333            return -EINVAL;
2334         }
2335         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2336           
2337         /* enable bus mastering */
2338         pci_set_master(iadev->pci);
2339
2340         /*  
2341          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2342          */  
2343         udelay(10);  
2344           
2345         /* mapping the physical address to a virtual address in address space */  
2346         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2347           
2348         if (!base)  
2349         {  
2350                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2351                             dev->number);  
2352                 return error;  
2353         }  
2354         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2355                         dev->number, iadev->pci->revision, base, iadev->irq);)
2356           
2357         /* filling the iphase dev structure */  
2358         iadev->mem = iadev->pci_map_size /2;  
2359         iadev->real_base = real_base;  
2360         iadev->base = base;  
2361                   
2362         /* Bus Interface Control Registers */  
2363         iadev->reg = base + REG_BASE;
2364         /* Segmentation Control Registers */  
2365         iadev->seg_reg = base + SEG_BASE;
2366         /* Reassembly Control Registers */  
2367         iadev->reass_reg = base + REASS_BASE;  
2368         /* Front end/ DMA control registers */  
2369         iadev->phy = base + PHY_BASE;  
2370         iadev->dma = base + PHY_BASE;  
2371         /* RAM - Segmentation RAm and Reassembly RAM */  
2372         iadev->ram = base + ACTUAL_RAM_BASE;  
2373         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2374         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2375   
2376         /* lets print out the above */  
2377         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2378           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2379           iadev->phy, iadev->ram, iadev->seg_ram, 
2380           iadev->reass_ram);) 
2381           
2382         /* lets try reading the MAC address */  
2383         error = get_esi(dev);  
2384         if (error) {
2385           iounmap(iadev->base);
2386           return error;  
2387         }
2388         printk("IA: ");
2389         for (i=0; i < ESI_LEN; i++)  
2390                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2391         printk("\n");  
2392   
2393         /* reset SAR */  
2394         if (reset_sar(dev)) {
2395            iounmap(iadev->base);
2396            printk("IA: reset SAR fail, please try again\n");
2397            return 1;
2398         }
2399         return 0;  
2400 }  
2401
2402 static void ia_update_stats(IADEV *iadev) {
2403     if (!iadev->carrier_detect)
2404         return;
2405     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2406     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2407     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2408     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2409     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2410     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2411     return;
2412 }
2413   
2414 static void ia_led_timer(unsigned long arg) {
2415         unsigned long flags;
2416         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2417         u_char i;
2418         static u32 ctrl_reg; 
2419         for (i = 0; i < iadev_count; i++) {
2420            if (ia_dev[i]) {
2421               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2422               if (blinking[i] == 0) {
2423                  blinking[i]++;
2424                  ctrl_reg &= (~CTRL_LED);
2425                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2426                  ia_update_stats(ia_dev[i]);
2427               }
2428               else {
2429                  blinking[i] = 0;
2430                  ctrl_reg |= CTRL_LED;
2431                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2432                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2433                  if (ia_dev[i]->close_pending)  
2434                     wake_up(&ia_dev[i]->close_wait);
2435                  ia_tx_poll(ia_dev[i]);
2436                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2437               }
2438            }
2439         }
2440         mod_timer(&ia_timer, jiffies + HZ / 4);
2441         return;
2442 }
2443
2444 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2445         unsigned long addr)  
2446 {  
2447         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2448 }  
2449   
2450 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2451 {  
2452         return readl(INPH_IA_DEV(dev)->phy+addr);  
2453 }  
2454
2455 static void ia_free_tx(IADEV *iadev)
2456 {
2457         int i;
2458
2459         kfree(iadev->desc_tbl);
2460         for (i = 0; i < iadev->num_vc; i++)
2461                 kfree(iadev->testTable[i]);
2462         kfree(iadev->testTable);
2463         for (i = 0; i < iadev->num_tx_desc; i++) {
2464                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2465
2466                 pci_unmap_single(iadev->pci, desc->dma_addr,
2467                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2468                 kfree(desc->cpcs);
2469         }
2470         kfree(iadev->tx_buf);
2471         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2472                             iadev->tx_dle_dma);  
2473 }
2474
2475 static void ia_free_rx(IADEV *iadev)
2476 {
2477         kfree(iadev->rx_open);
2478         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2479                           iadev->rx_dle_dma);  
2480 }
2481
2482 static int __devinit ia_start(struct atm_dev *dev)
2483 {  
2484         IADEV *iadev;  
2485         int error;  
2486         unsigned char phy;  
2487         u32 ctrl_reg;  
2488         IF_EVENT(printk(">ia_start\n");)  
2489         iadev = INPH_IA_DEV(dev);  
2490         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2491                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2492                     dev->number, iadev->irq);  
2493                 error = -EAGAIN;
2494                 goto err_out;
2495         }  
2496         /* @@@ should release IRQ on error */  
2497         /* enabling memory + master */  
2498         if ((error = pci_write_config_word(iadev->pci,   
2499                                 PCI_COMMAND,   
2500                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2501         {  
2502                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2503                     "master (0x%x)\n",dev->number, error);  
2504                 error = -EIO;  
2505                 goto err_free_irq;
2506         }  
2507         udelay(10);  
2508   
2509         /* Maybe we should reset the front end, initialize Bus Interface Control   
2510                 Registers and see. */  
2511   
2512         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2513                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2514         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2515         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2516                         | CTRL_B8  
2517                         | CTRL_B16  
2518                         | CTRL_B32  
2519                         | CTRL_B48  
2520                         | CTRL_B64  
2521                         | CTRL_B128  
2522                         | CTRL_ERRMASK  
2523                         | CTRL_DLETMASK         /* shud be removed l8r */  
2524                         | CTRL_DLERMASK  
2525                         | CTRL_SEGMASK  
2526                         | CTRL_REASSMASK          
2527                         | CTRL_FEMASK  
2528                         | CTRL_CSPREEMPT;  
2529   
2530        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2531   
2532         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2533                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2534            printk("Bus status reg after init: %08x\n", 
2535                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2536     
2537         ia_hw_type(iadev); 
2538         error = tx_init(dev);  
2539         if (error)
2540                 goto err_free_irq;
2541         error = rx_init(dev);  
2542         if (error)
2543                 goto err_free_tx;
2544   
2545         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2546         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2547         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2548                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2549         phy = 0; /* resolve compiler complaint */
2550         IF_INIT ( 
2551         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2552                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2553         else  
2554                 printk("IA: utopia,rev.%0x\n",phy);) 
2555
2556         if (iadev->phy_type &  FE_25MBIT_PHY)
2557            ia_mb25_init(iadev);
2558         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2559            ia_suni_pm7345_init(iadev);
2560         else {
2561                 error = suni_init(dev);
2562                 if (error)
2563                         goto err_free_rx;
2564                 /* 
2565                  * Enable interrupt on loss of signal
2566                  * SUNI_RSOP_CIE - 0x10
2567                  * SUNI_RSOP_CIE_LOSE - 0x04
2568                  */
2569                 ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2570 #ifndef MODULE
2571                 error = dev->phy->start(dev);
2572                 if (error)
2573                         goto err_free_rx;
2574 #endif
2575                 /* Get iadev->carrier_detect status */
2576                 IaFrontEndIntr(iadev);
2577         }
2578         return 0;
2579
2580 err_free_rx:
2581         ia_free_rx(iadev);
2582 err_free_tx:
2583         ia_free_tx(iadev);
2584 err_free_irq:
2585         free_irq(iadev->irq, dev);  
2586 err_out:
2587         return error;
2588 }  
2589   
2590 static void ia_close(struct atm_vcc *vcc)  
2591 {
2592         DEFINE_WAIT(wait);
2593         u16 *vc_table;
2594         IADEV *iadev;
2595         struct ia_vcc *ia_vcc;
2596         struct sk_buff *skb = NULL;
2597         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2598         unsigned long closetime, flags;
2599
2600         iadev = INPH_IA_DEV(vcc->dev);
2601         ia_vcc = INPH_IA_VCC(vcc);
2602         if (!ia_vcc) return;  
2603
2604         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2605                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2606         clear_bit(ATM_VF_READY,&vcc->flags);
2607         skb_queue_head_init (&tmp_tx_backlog);
2608         skb_queue_head_init (&tmp_vcc_backlog); 
2609         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2610            iadev->close_pending++;
2611            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2612            schedule_timeout(50);
2613            finish_wait(&iadev->timeout_wait, &wait);
2614            spin_lock_irqsave(&iadev->tx_lock, flags); 
2615            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2616               if (ATM_SKB(skb)->vcc == vcc){ 
2617                  if (vcc->pop) vcc->pop(vcc, skb);
2618                  else dev_kfree_skb_any(skb);
2619               }
2620               else 
2621                  skb_queue_tail(&tmp_tx_backlog, skb);
2622            } 
2623            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2624              skb_queue_tail(&iadev->tx_backlog, skb);
2625            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2626            closetime = 300000 / ia_vcc->pcr;
2627            if (closetime == 0)
2628               closetime = 1;
2629            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2630            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2631            spin_lock_irqsave(&iadev->tx_lock, flags);
2632            iadev->close_pending--;
2633            iadev->testTable[vcc->vci]->lastTime = 0;
2634            iadev->testTable[vcc->vci]->fract = 0; 
2635            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2636            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2637               if (vcc->qos.txtp.min_pcr > 0)
2638                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2639            }
2640            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2641               ia_vcc = INPH_IA_VCC(vcc); 
2642               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2643               ia_cbrVc_close (vcc);
2644            }
2645            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2646         }
2647         
2648         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2649            // reset reass table
2650            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2651            vc_table += vcc->vci; 
2652            *vc_table = NO_AAL5_PKT;
2653            // reset vc table
2654            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2655            vc_table += vcc->vci;
2656            *vc_table = (vcc->vci << 6) | 15;
2657            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2658               struct abr_vc_table __iomem *abr_vc_table = 
2659                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2660               abr_vc_table +=  vcc->vci;
2661               abr_vc_table->rdf = 0x0003;
2662               abr_vc_table->air = 0x5eb1;
2663            }                                 
2664            // Drain the packets
2665            rx_dle_intr(vcc->dev); 
2666            iadev->rx_open[vcc->vci] = NULL;
2667         }
2668         kfree(INPH_IA_VCC(vcc));  
2669         ia_vcc = NULL;
2670         vcc->dev_data = NULL;
2671         clear_bit(ATM_VF_ADDR,&vcc->flags);
2672         return;        
2673 }  
2674   
2675 static int ia_open(struct atm_vcc *vcc)
2676 {  
2677         IADEV *iadev;  
2678         struct ia_vcc *ia_vcc;  
2679         int error;  
2680         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2681         {  
2682                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2683                 vcc->dev_data = NULL;
2684         }  
2685         iadev = INPH_IA_DEV(vcc->dev);  
2686         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2687         {  
2688                 IF_EVENT(printk("iphase open: unspec part\n");)  
2689                 set_bit(ATM_VF_ADDR,&vcc->flags);
2690         }  
2691         if (vcc->qos.aal != ATM_AAL5)  
2692                 return -EINVAL;  
2693         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2694                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2695   
2696         /* Device dependent initialization */  
2697         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2698         if (!ia_vcc) return -ENOMEM;  
2699         vcc->dev_data = ia_vcc;
2700   
2701         if ((error = open_rx(vcc)))  
2702         {  
2703                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2704                 ia_close(vcc);  
2705                 return error;  
2706         }  
2707   
2708         if ((error = open_tx(vcc)))  
2709         {  
2710                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2711                 ia_close(vcc);  
2712                 return error;  
2713         }  
2714   
2715         set_bit(ATM_VF_READY,&vcc->flags);
2716
2717 #if 0
2718         {
2719            static u8 first = 1; 
2720            if (first) {
2721               ia_timer.expires = jiffies + 3*HZ;
2722               add_timer(&ia_timer);
2723               first = 0;
2724            }           
2725         }
2726 #endif
2727         IF_EVENT(printk("ia open returning\n");)  
2728         return 0;  
2729 }  
2730   
2731 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2732 {  
2733         IF_EVENT(printk(">ia_change_qos\n");)  
2734         return 0;  
2735 }  
2736   
2737 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2738 {  
2739    IA_CMDBUF ia_cmds;
2740    IADEV *iadev;
2741    int i, board;
2742    u16 __user *tmps;
2743    IF_EVENT(printk(">ia_ioctl\n");)  
2744    if (cmd != IA_CMD) {
2745       if (!dev->phy->ioctl) return -EINVAL;
2746       return dev->phy->ioctl(dev,cmd,arg);
2747    }
2748    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2749    board = ia_cmds.status;
2750    if ((board < 0) || (board > iadev_count))
2751          board = 0;    
2752    iadev = ia_dev[board];
2753    switch (ia_cmds.cmd) {
2754    case MEMDUMP:
2755    {
2756         switch (ia_cmds.sub_cmd) {
2757           case MEMDUMP_DEV:     
2758              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2759              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2760                 return -EFAULT;
2761              ia_cmds.status = 0;
2762              break;
2763           case MEMDUMP_SEGREG:
2764              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2765              tmps = (u16 __user *)ia_cmds.buf;
2766              for(i=0; i<0x80; i+=2, tmps++)
2767                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2768              ia_cmds.status = 0;
2769              ia_cmds.len = 0x80;
2770              break;
2771           case MEMDUMP_REASSREG:
2772              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2773              tmps = (u16 __user *)ia_cmds.buf;
2774              for(i=0; i<0x80; i+=2, tmps++)
2775                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2776              ia_cmds.status = 0;
2777              ia_cmds.len = 0x80;
2778              break;
2779           case MEMDUMP_FFL:
2780           {  
2781              ia_regs_t       *regs_local;
2782              ffredn_t        *ffL;
2783              rfredn_t        *rfL;
2784                      
2785              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2786              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2787              if (!regs_local) return -ENOMEM;
2788              ffL = &regs_local->ffredn;
2789              rfL = &regs_local->rfredn;
2790              /* Copy real rfred registers into the local copy */
2791              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2792                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2793                 /* Copy real ffred registers into the local copy */
2794              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2795                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2796
2797              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2798                 kfree(regs_local);
2799                 return -EFAULT;
2800              }
2801              kfree(regs_local);
2802              printk("Board %d registers dumped\n", board);
2803              ia_cmds.status = 0;                  
2804          }      
2805              break;        
2806          case READ_REG:
2807          {  
2808              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2809              desc_dbg(iadev); 
2810              ia_cmds.status = 0; 
2811          }
2812              break;
2813          case 0x6:
2814          {  
2815              ia_cmds.status = 0; 
2816              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2817              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2818          }
2819              break;
2820          case 0x8:
2821          {
2822              struct k_sonet_stats *stats;
2823              stats = &PRIV(_ia_dev[board])->sonet_stats;
2824              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2825              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2826              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2827              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2828              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2829              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2830              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2831              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2832              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2833          }
2834             ia_cmds.status = 0;
2835             break;
2836          case 0x9:
2837             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2838             for (i = 1; i <= iadev->num_rx_desc; i++)
2839                free_desc(_ia_dev[board], i);
2840             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2841                                             iadev->reass_reg+REASS_MASK_REG);
2842             iadev->rxing = 1;
2843             
2844             ia_cmds.status = 0;
2845             break;
2846
2847          case 0xb:
2848             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2849             IaFrontEndIntr(iadev);
2850             break;
2851          case 0xa:
2852             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2853          {  
2854              ia_cmds.status = 0; 
2855              IADebugFlag = ia_cmds.maddr;
2856              printk("New debug option loaded\n");
2857          }
2858              break;
2859          default:
2860              ia_cmds.status = 0;
2861              break;
2862       } 
2863    }
2864       break;
2865    default:
2866       break;
2867
2868    }    
2869    return 0;  
2870 }  
2871   
2872 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2873         void __user *optval, int optlen)  
2874 {  
2875         IF_EVENT(printk(">ia_getsockopt\n");)  
2876         return -EINVAL;  
2877 }  
2878   
2879 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2880         void __user *optval, int optlen)  
2881 {  
2882         IF_EVENT(printk(">ia_setsockopt\n");)  
2883         return -EINVAL;  
2884 }  
2885   
2886 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2887         IADEV *iadev;
2888         struct dle *wr_ptr;
2889         struct tx_buf_desc __iomem *buf_desc_ptr;
2890         int desc;
2891         int comp_code;
2892         int total_len;
2893         struct cpcs_trailer *trailer;
2894         struct ia_vcc *iavcc;
2895
2896         iadev = INPH_IA_DEV(vcc->dev);  
2897         iavcc = INPH_IA_VCC(vcc);
2898         if (!iavcc->txing) {
2899            printk("discard packet on closed VC\n");
2900            if (vcc->pop)
2901                 vcc->pop(vcc, skb);
2902            else
2903                 dev_kfree_skb_any(skb);
2904            return 0;
2905         }
2906
2907         if (skb->len > iadev->tx_buf_sz - 8) {
2908            printk("Transmit size over tx buffer size\n");
2909            if (vcc->pop)
2910                  vcc->pop(vcc, skb);
2911            else
2912                  dev_kfree_skb_any(skb);
2913           return 0;
2914         }
2915         if ((u32)skb->data & 3) {
2916            printk("Misaligned SKB\n");
2917            if (vcc->pop)
2918                  vcc->pop(vcc, skb);
2919            else
2920                  dev_kfree_skb_any(skb);
2921            return 0;
2922         }       
2923         /* Get a descriptor number from our free descriptor queue  
2924            We get the descr number from the TCQ now, since I am using  
2925            the TCQ as a free buffer queue. Initially TCQ will be   
2926            initialized with all the descriptors and is hence, full.  
2927         */
2928         desc = get_desc (iadev, iavcc);
2929         if (desc == 0xffff) 
2930             return 1;
2931         comp_code = desc >> 13;  
2932         desc &= 0x1fff;  
2933   
2934         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2935         {  
2936                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2937                 atomic_inc(&vcc->stats->tx);
2938                 if (vcc->pop)   
2939                     vcc->pop(vcc, skb);   
2940                 else  
2941                     dev_kfree_skb_any(skb);
2942                 return 0;   /* return SUCCESS */
2943         }  
2944   
2945         if (comp_code)  
2946         {  
2947             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2948                                                             desc, comp_code);)  
2949         }  
2950        
2951         /* remember the desc and vcc mapping */
2952         iavcc->vc_desc_cnt++;
2953         iadev->desc_tbl[desc-1].iavcc = iavcc;
2954         iadev->desc_tbl[desc-1].txskb = skb;
2955         IA_SKB_STATE(skb) = 0;
2956
2957         iadev->ffL.tcq_rd += 2;
2958         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2959                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2960         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2961   
2962         /* Put the descriptor number in the packet ready queue  
2963                 and put the updated write pointer in the DLE field   
2964         */   
2965         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2966
2967         iadev->ffL.prq_wr += 2;
2968         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2969                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2970           
2971         /* Figure out the exact length of the packet and padding required to 
2972            make it  aligned on a 48 byte boundary.  */
2973         total_len = skb->len + sizeof(struct cpcs_trailer);  
2974         total_len = ((total_len + 47) / 48) * 48;
2975         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2976  
2977         /* Put the packet in a tx buffer */   
2978         trailer = iadev->tx_buf[desc-1].cpcs;
2979         IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2980                   (u32)skb, (u32)skb->data, skb->len, desc);)
2981         trailer->control = 0; 
2982         /*big endian*/ 
2983         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2984         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2985
2986         /* Display the packet */  
2987         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2988                                                         skb->len, tcnter++);  
2989         xdump(skb->data, skb->len, "TX: ");
2990         printk("\n");)
2991
2992         /* Build the buffer descriptor */  
2993         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2994         buf_desc_ptr += desc;   /* points to the corresponding entry */  
2995         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
2996         /* Huh ? p.115 of users guide describes this as a read-only register */
2997         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2998         buf_desc_ptr->vc_index = vcc->vci;
2999         buf_desc_ptr->bytes = total_len;  
3000
3001         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3002            clear_lockup (vcc, iadev);
3003
3004         /* Build the DLE structure */  
3005         wr_ptr = iadev->tx_dle_q.write;  
3006         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3007         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3008                 skb->len, PCI_DMA_TODEVICE);
3009         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3010                                                   buf_desc_ptr->buf_start_lo;  
3011         /* wr_ptr->bytes = swap(total_len);     didn't seem to affect ?? */  
3012         wr_ptr->bytes = skb->len;  
3013
3014         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3015         if ((wr_ptr->bytes >> 2) == 0xb)
3016            wr_ptr->bytes = 0x30;
3017
3018         wr_ptr->mode = TX_DLE_PSI; 
3019         wr_ptr->prq_wr_ptr_data = 0;
3020   
3021         /* end is not to be used for the DLE q */  
3022         if (++wr_ptr == iadev->tx_dle_q.end)  
3023                 wr_ptr = iadev->tx_dle_q.start;  
3024         
3025         /* Build trailer dle */
3026         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3027         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3028           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3029
3030         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3031         wr_ptr->mode = DMA_INT_ENABLE; 
3032         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3033         
3034         /* end is not to be used for the DLE q */
3035         if (++wr_ptr == iadev->tx_dle_q.end)  
3036                 wr_ptr = iadev->tx_dle_q.start;
3037
3038         iadev->tx_dle_q.write = wr_ptr;  
3039         ATM_DESC(skb) = vcc->vci;
3040         skb_queue_tail(&iadev->tx_dma_q, skb);
3041
3042         atomic_inc(&vcc->stats->tx);
3043         iadev->tx_pkt_cnt++;
3044         /* Increment transaction counter */  
3045         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3046         
3047 #if 0        
3048         /* add flow control logic */ 
3049         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3050           if (iavcc->vc_desc_cnt > 10) {
3051              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3052             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3053               iavcc->flow_inc = -1;
3054               iavcc->saved_tx_quota = vcc->tx_quota;
3055            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3056              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3057              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3058               iavcc->flow_inc = 0;
3059            }
3060         }
3061 #endif
3062         IF_TX(printk("ia send done\n");)  
3063         return 0;  
3064 }  
3065
3066 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3067 {
3068         IADEV *iadev; 
3069         struct ia_vcc *iavcc;
3070         unsigned long flags;
3071
3072         iadev = INPH_IA_DEV(vcc->dev);
3073         iavcc = INPH_IA_VCC(vcc); 
3074         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3075         {
3076             if (!skb)
3077                 printk(KERN_CRIT "null skb in ia_send\n");
3078             else dev_kfree_skb_any(skb);
3079             return -EINVAL;
3080         }                         
3081         spin_lock_irqsave(&iadev->tx_lock, flags); 
3082         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3083             dev_kfree_skb_any(skb);
3084             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3085             return -EINVAL; 
3086         }
3087         ATM_SKB(skb)->vcc = vcc;
3088  
3089         if (skb_peek(&iadev->tx_backlog)) {
3090            skb_queue_tail(&iadev->tx_backlog, skb);
3091         }
3092         else {
3093            if (ia_pkt_tx (vcc, skb)) {
3094               skb_queue_tail(&iadev->tx_backlog, skb);
3095            }
3096         }
3097         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3098         return 0;
3099
3100 }
3101
3102 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3103
3104   int   left = *pos, n;   
3105   char  *tmpPtr;
3106   IADEV *iadev = INPH_IA_DEV(dev);
3107   if(!left--) {
3108      if (iadev->phy_type == FE_25MBIT_PHY) {
3109        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3110        return n;
3111      }
3112      if (iadev->phy_type == FE_DS3_PHY)
3113         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3114      else if (iadev->phy_type == FE_E3_PHY)
3115         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3116      else if (iadev->phy_type == FE_UTP_OPTION)
3117          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3118      else
3119         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3120      tmpPtr = page + n;
3121      if (iadev->pci_map_size == 0x40000)
3122         n += sprintf(tmpPtr, "-1KVC-");
3123      else
3124         n += sprintf(tmpPtr, "-4KVC-");  
3125      tmpPtr = page + n; 
3126      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3127         n += sprintf(tmpPtr, "1M  \n");
3128      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3129         n += sprintf(tmpPtr, "512K\n");
3130      else
3131        n += sprintf(tmpPtr, "128K\n");
3132      return n;
3133   }
3134   if (!left) {
3135      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3136                            "  Size of Tx Buffer  :  %u\n"
3137                            "  Number of Rx Buffer:  %u\n"
3138                            "  Size of Rx Buffer  :  %u\n"
3139                            "  Packets Receiverd  :  %u\n"
3140                            "  Packets Transmitted:  %u\n"
3141                            "  Cells Received     :  %u\n"
3142                            "  Cells Transmitted  :  %u\n"
3143                            "  Board Dropped Cells:  %u\n"
3144                            "  Board Dropped Pkts :  %u\n",
3145                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3146                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3147                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3148                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3149                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3150   }
3151   return 0;
3152 }
3153   
3154 static const struct atmdev_ops ops = {  
3155         .open           = ia_open,  
3156         .close          = ia_close,  
3157         .ioctl          = ia_ioctl,  
3158         .getsockopt     = ia_getsockopt,  
3159         .setsockopt     = ia_setsockopt,  
3160         .send           = ia_send,  
3161         .phy_put        = ia_phy_put,  
3162         .phy_get        = ia_phy_get,  
3163         .change_qos     = ia_change_qos,  
3164         .proc_read      = ia_proc_read,
3165         .owner          = THIS_MODULE,
3166 };  
3167           
3168 static int __devinit ia_init_one(struct pci_dev *pdev,
3169                                  const struct pci_device_id *ent)
3170 {  
3171         struct atm_dev *dev;  
3172         IADEV *iadev;  
3173         unsigned long flags;
3174         int ret;
3175
3176         iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3177         if (!iadev) {
3178                 ret = -ENOMEM;
3179                 goto err_out;
3180         }
3181
3182         iadev->pci = pdev;
3183
3184         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3185                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3186         if (pci_enable_device(pdev)) {
3187                 ret = -ENODEV;
3188                 goto err_out_free_iadev;
3189         }
3190         dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3191         if (!dev) {
3192                 ret = -ENOMEM;
3193                 goto err_out_disable_dev;
3194         }
3195         dev->dev_data = iadev;
3196         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3197         IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3198                 iadev->LineRate);)
3199
3200         ia_dev[iadev_count] = iadev;
3201         _ia_dev[iadev_count] = dev;
3202         iadev_count++;
3203         spin_lock_init(&iadev->misc_lock);
3204         /* First fixes first. I don't want to think about this now. */
3205         spin_lock_irqsave(&iadev->misc_lock, flags); 
3206         if (ia_init(dev) || ia_start(dev)) {  
3207                 IF_INIT(printk("IA register failed!\n");)
3208                 iadev_count--;
3209                 ia_dev[iadev_count] = NULL;
3210                 _ia_dev[iadev_count] = NULL;
3211                 spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3212                 ret = -EINVAL;
3213                 goto err_out_deregister_dev;
3214         }
3215         spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3216         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3217
3218         iadev->next_board = ia_boards;  
3219         ia_boards = dev;  
3220
3221         pci_set_drvdata(pdev, dev);
3222
3223         return 0;
3224
3225 err_out_deregister_dev:
3226         atm_dev_deregister(dev);  
3227 err_out_disable_dev:
3228         pci_disable_device(pdev);
3229 err_out_free_iadev:
3230         kfree(iadev);
3231 err_out:
3232         return ret;
3233 }
3234
3235 static void __devexit ia_remove_one(struct pci_dev *pdev)
3236 {
3237         struct atm_dev *dev = pci_get_drvdata(pdev);
3238         IADEV *iadev = INPH_IA_DEV(dev);
3239
3240         ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10); 
3241         udelay(1);
3242
3243         /* De-register device */  
3244         free_irq(iadev->irq, dev);
3245         iadev_count--;
3246         ia_dev[iadev_count] = NULL;
3247         _ia_dev[iadev_count] = NULL;
3248         IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3249         atm_dev_deregister(dev);
3250
3251         iounmap(iadev->base);  
3252         pci_disable_device(pdev);
3253
3254         ia_free_rx(iadev);
3255         ia_free_tx(iadev);
3256
3257         kfree(iadev);
3258 }
3259
3260 static struct pci_device_id ia_pci_tbl[] = {
3261         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3262         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3263         { 0,}
3264 };
3265 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3266
3267 static struct pci_driver ia_driver = {
3268         .name =         DEV_LABEL,
3269         .id_table =     ia_pci_tbl,
3270         .probe =        ia_init_one,
3271         .remove =       __devexit_p(ia_remove_one),
3272 };
3273
3274 static int __init ia_module_init(void)
3275 {
3276         int ret;
3277
3278         ret = pci_register_driver(&ia_driver);
3279         if (ret >= 0) {
3280                 ia_timer.expires = jiffies + 3*HZ;
3281                 add_timer(&ia_timer); 
3282         } else
3283                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3284         return ret;
3285 }
3286
3287 static void __exit ia_module_exit(void)
3288 {
3289         pci_unregister_driver(&ia_driver);
3290
3291         del_timer(&ia_timer);
3292 }
3293
3294 module_init(ia_module_init);
3295 module_exit(ia_module_exit);