1 /******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
7 *******************************************************************************
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The followings are the change log and history:
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
41 *******************************************************************************/
43 #include <linux/module.h>
44 #include <linux/kernel.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/sonet.h>
51 #include <linux/skbuff.h>
52 #include <linux/time.h>
53 #include <linux/delay.h>
54 #include <linux/uio.h>
55 #include <linux/init.h>
56 #include <linux/wait.h>
57 #include <asm/system.h>
59 #include <asm/atomic.h>
60 #include <asm/uaccess.h>
61 #include <asm/string.h>
62 #include <asm/byteorder.h>
63 #include <linux/vmalloc.h>
64 #include <linux/jiffies.h>
67 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
69 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
71 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
72 static void desc_dbg(IADEV *iadev);
74 static IADEV *ia_dev[8];
75 static struct atm_dev *_ia_dev[8];
76 static int iadev_count;
77 static void ia_led_timer(unsigned long arg);
78 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
79 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
80 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
81 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
82 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
84 module_param(IA_TX_BUF, int, 0);
85 module_param(IA_TX_BUF_SZ, int, 0);
86 module_param(IA_RX_BUF, int, 0);
87 module_param(IA_RX_BUF_SZ, int, 0);
88 module_param(IADebugFlag, uint, 0644);
90 MODULE_LICENSE("GPL");
92 #if BITS_PER_LONG != 32
93 # error FIXME: this driver only works on 32-bit platforms
96 /**************************** IA_LIB **********************************/
98 static void ia_init_rtn_q (IARTN_Q *que)
104 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
107 if (que->next == NULL)
108 que->next = que->tail = data;
110 data->next = que->next;
116 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
117 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
118 if (!entry) return -1;
121 if (que->next == NULL)
122 que->next = que->tail = entry;
124 que->tail->next = entry;
125 que->tail = que->tail->next;
130 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
132 if (que->next == NULL)
135 if ( que->next == que->tail)
136 que->next = que->tail = NULL;
138 que->next = que->next->next;
142 static void ia_hack_tcq(IADEV *dev) {
146 struct ia_vcc *iavcc_r = NULL;
148 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
149 while (dev->host_tcq_wr != tcq_wr) {
150 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
152 else if (!dev->desc_tbl[desc1 -1].timestamp) {
153 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
154 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
156 else if (dev->desc_tbl[desc1 -1].timestamp) {
157 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
158 printk("IA: Fatal err in get_desc\n");
161 iavcc_r->vc_desc_cnt--;
162 dev->desc_tbl[desc1 -1].timestamp = 0;
163 IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n",
164 (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
165 if (iavcc_r->pcr < dev->rate_limit) {
166 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
167 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
168 printk("ia_hack_tcq: No memory available\n");
170 dev->desc_tbl[desc1 -1].iavcc = NULL;
171 dev->desc_tbl[desc1 -1].txskb = NULL;
173 dev->host_tcq_wr += 2;
174 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
175 dev->host_tcq_wr = dev->ffL.tcq_st;
179 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
182 struct ia_vcc *iavcc_r = NULL;
184 static unsigned long timer = 0;
188 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
191 while (i < dev->num_tx_desc) {
192 if (!dev->desc_tbl[i].timestamp) {
196 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
197 delta = jiffies - dev->desc_tbl[i].timestamp;
198 if (delta >= ltimeout) {
199 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
200 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
201 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
203 dev->ffL.tcq_rd -= 2;
204 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
205 if (!(skb = dev->desc_tbl[i].txskb) ||
206 !(iavcc_r = dev->desc_tbl[i].iavcc))
207 printk("Fatal err, desc table vcc or skb is NULL\n");
209 iavcc_r->vc_desc_cnt--;
210 dev->desc_tbl[i].timestamp = 0;
211 dev->desc_tbl[i].iavcc = NULL;
212 dev->desc_tbl[i].txskb = NULL;
217 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
220 /* Get the next available descriptor number from TCQ */
221 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
223 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
224 dev->ffL.tcq_rd += 2;
225 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
226 dev->ffL.tcq_rd = dev->ffL.tcq_st;
227 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
229 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
232 /* get system time */
233 dev->desc_tbl[desc_num -1].timestamp = jiffies;
237 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
239 vcstatus_t *vcstatus;
241 u_short tempCellSlot, tempFract;
242 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
243 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
246 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
247 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
250 if( vcstatus->cnt == 0x05 ) {
253 if( eabr_vc->last_desc ) {
254 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
255 /* Wait for 10 Micro sec */
257 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
261 tempCellSlot = abr_vc->last_cell_slot;
262 tempFract = abr_vc->fraction;
263 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
264 && (tempFract == dev->testTable[vcc->vci]->fract))
266 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
267 dev->testTable[vcc->vci]->fract = tempFract;
269 } /* last descriptor */
271 } /* vcstatus->cnt */
274 IF_ABR(printk("LOCK UP found\n");)
275 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
276 /* Wait for 10 Micro sec */
278 abr_vc->status &= 0xFFF8;
279 abr_vc->status |= 0x0001; /* state is idle */
280 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
281 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
283 shd_tbl[i] = vcc->vci;
285 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
286 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
287 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
288 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
298 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
300 ** +----+----+------------------+-------------------------------+
301 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
302 ** +----+----+------------------+-------------------------------+
304 ** R = reserved (written as 0)
305 ** NZ = 0 if 0 cells/sec; 1 otherwise
307 ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
310 cellrate_to_float(u32 cr)
314 #define M_BITS 9 /* Number of bits in mantissa */
315 #define E_BITS 5 /* Number of bits in exponent */
319 u32 tmp = cr & 0x00ffffff;
328 flot = NZ | (i << M_BITS) | (cr & M_MASK);
330 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
332 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
338 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
341 float_to_cellrate(u16 rate)
343 u32 exp, mantissa, cps;
344 if ((rate & NZ) == 0)
346 exp = (rate >> M_BITS) & E_MASK;
347 mantissa = rate & M_MASK;
350 cps = (1 << M_BITS) | mantissa;
353 else if (exp > M_BITS)
354 cps <<= (exp - M_BITS);
356 cps >>= (M_BITS - exp);
361 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
362 srv_p->class_type = ATM_ABR;
363 srv_p->pcr = dev->LineRate;
365 srv_p->icr = 0x055cb7;
366 srv_p->tbe = 0xffffff;
377 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
378 struct atm_vcc *vcc, u8 flag)
380 f_vc_abr_entry *f_abr_vc;
381 r_vc_abr_entry *r_abr_vc;
384 u16 adtf, air, *ptr16;
385 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
386 f_abr_vc += vcc->vci;
388 case 1: /* FFRED initialization */
389 #if 0 /* sanity check */
392 if (srv_p->pcr > dev->LineRate)
393 srv_p->pcr = dev->LineRate;
394 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
395 return MCR_UNAVAILABLE;
396 if (srv_p->mcr > srv_p->pcr)
399 srv_p->icr = srv_p->pcr;
400 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
402 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
404 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
406 if (srv_p->nrm > MAX_NRM)
408 if (srv_p->trm > MAX_TRM)
410 if (srv_p->adtf > MAX_ADTF)
412 else if (srv_p->adtf == 0)
414 if (srv_p->cdf > MAX_CDF)
416 if (srv_p->rif > MAX_RIF)
418 if (srv_p->rdf > MAX_RDF)
421 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
422 f_abr_vc->f_vc_type = ABR;
423 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
424 /* i.e 2**n = 2 << (n-1) */
425 f_abr_vc->f_nrm = nrm << 8 | nrm;
426 trm = 100000/(2 << (16 - srv_p->trm));
427 if ( trm == 0) trm = 1;
428 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
429 crm = srv_p->tbe / nrm;
430 if (crm == 0) crm = 1;
431 f_abr_vc->f_crm = crm & 0xff;
432 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
433 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
434 ((srv_p->tbe/srv_p->frtt)*1000000) :
435 (1000000/(srv_p->frtt/srv_p->tbe)));
436 f_abr_vc->f_icr = cellrate_to_float(icr);
437 adtf = (10000 * srv_p->adtf)/8192;
438 if (adtf == 0) adtf = 1;
439 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
440 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
441 f_abr_vc->f_acr = f_abr_vc->f_icr;
442 f_abr_vc->f_status = 0x0042;
444 case 0: /* RFRED initialization */
445 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
446 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
447 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
448 r_abr_vc += vcc->vci;
449 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
450 air = srv_p->pcr << (15 - srv_p->rif);
451 if (air == 0) air = 1;
452 r_abr_vc->r_air = cellrate_to_float(air);
453 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
454 dev->sum_mcr += srv_p->mcr;
462 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
463 u32 rateLow=0, rateHigh, rate;
465 struct ia_vcc *ia_vcc;
467 int idealSlot =0, testSlot, toBeAssigned, inc;
469 u16 *SchedTbl, *TstSchedTbl;
475 /* IpAdjustTrafficParams */
476 if (vcc->qos.txtp.max_pcr <= 0) {
477 IF_ERR(printk("PCR for CBR not defined\n");)
480 rate = vcc->qos.txtp.max_pcr;
481 entries = rate / dev->Granularity;
482 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
483 entries, rate, dev->Granularity);)
485 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
486 rateLow = entries * dev->Granularity;
487 rateHigh = (entries + 1) * dev->Granularity;
488 if (3*(rate - rateLow) > (rateHigh - rate))
490 if (entries > dev->CbrRemEntries) {
491 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
492 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
493 entries, dev->CbrRemEntries);)
497 ia_vcc = INPH_IA_VCC(vcc);
498 ia_vcc->NumCbrEntry = entries;
499 dev->sum_mcr += entries * dev->Granularity;
500 /* IaFFrednInsertCbrSched */
501 // Starting at an arbitrary location, place the entries into the table
502 // as smoothly as possible
504 spacing = dev->CbrTotEntries / entries;
505 sp_mod = dev->CbrTotEntries % entries; // get modulo
506 toBeAssigned = entries;
509 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
512 // If this is the first time, start the table loading for this connection
513 // as close to entryPoint as possible.
514 if (toBeAssigned == entries)
516 idealSlot = dev->CbrEntryPt;
517 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
518 if (dev->CbrEntryPt >= dev->CbrTotEntries)
519 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
521 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
522 // in the table that would be smoothest
523 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
524 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
526 if (idealSlot >= (int)dev->CbrTotEntries)
527 idealSlot -= dev->CbrTotEntries;
528 // Continuously check around this ideal value until a null
529 // location is encountered.
530 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
532 testSlot = idealSlot;
533 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
534 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
535 testSlot, (u32)TstSchedTbl,toBeAssigned);)
536 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
537 while (cbrVC) // If another VC at this location, we have to keep looking
540 testSlot = idealSlot - inc;
541 if (testSlot < 0) { // Wrap if necessary
542 testSlot += dev->CbrTotEntries;
543 IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
544 (u32)SchedTbl,testSlot);)
546 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
547 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
550 testSlot = idealSlot + inc;
551 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
552 testSlot -= dev->CbrTotEntries;
553 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
554 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
555 testSlot, toBeAssigned);)
557 // set table index and read in value
558 TstSchedTbl = (u16*)(SchedTbl + testSlot);
559 IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
560 (u32)TstSchedTbl,cbrVC,inc);)
561 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
563 // Move this VCI number into this location of the CBR Sched table.
564 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
565 dev->CbrRemEntries--;
569 /* IaFFrednCbrEnable */
570 dev->NumEnabledCBR++;
571 if (dev->NumEnabledCBR == 1) {
572 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
573 IF_CBR(printk("CBR is enabled\n");)
577 static void ia_cbrVc_close (struct atm_vcc *vcc) {
579 u16 *SchedTbl, NullVci = 0;
582 iadev = INPH_IA_DEV(vcc->dev);
583 iadev->NumEnabledCBR--;
584 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
585 if (iadev->NumEnabledCBR == 0) {
586 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
587 IF_CBR (printk("CBR support disabled\n");)
590 for (i=0; i < iadev->CbrTotEntries; i++)
592 if (*SchedTbl == vcc->vci) {
593 iadev->CbrRemEntries++;
599 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
602 static int ia_avail_descs(IADEV *iadev) {
605 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
606 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
608 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
609 iadev->ffL.tcq_st) / 2;
613 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
615 static int ia_que_tx (IADEV *iadev) {
619 struct ia_vcc *iavcc;
620 num_desc = ia_avail_descs(iadev);
622 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
623 if (!(vcc = ATM_SKB(skb)->vcc)) {
624 dev_kfree_skb_any(skb);
625 printk("ia_que_tx: Null vcc\n");
628 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
629 dev_kfree_skb_any(skb);
630 printk("Free the SKB on closed vci %d \n", vcc->vci);
633 iavcc = INPH_IA_VCC(vcc);
634 if (ia_pkt_tx (vcc, skb)) {
635 skb_queue_head(&iadev->tx_backlog, skb);
642 static void ia_tx_poll (IADEV *iadev) {
643 struct atm_vcc *vcc = NULL;
644 struct sk_buff *skb = NULL, *skb1 = NULL;
645 struct ia_vcc *iavcc;
649 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
650 skb = rtne->data.txskb;
652 printk("ia_tx_poll: skb is null\n");
655 vcc = ATM_SKB(skb)->vcc;
657 printk("ia_tx_poll: vcc is null\n");
658 dev_kfree_skb_any(skb);
662 iavcc = INPH_IA_VCC(vcc);
664 printk("ia_tx_poll: iavcc is null\n");
665 dev_kfree_skb_any(skb);
669 skb1 = skb_dequeue(&iavcc->txing_skb);
670 while (skb1 && (skb1 != skb)) {
671 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
672 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
674 IF_ERR(printk("Release the SKB not match\n");)
675 if ((vcc->pop) && (skb1->len != 0))
678 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
682 dev_kfree_skb_any(skb1);
683 skb1 = skb_dequeue(&iavcc->txing_skb);
686 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
687 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
690 if ((vcc->pop) && (skb->len != 0))
693 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
696 dev_kfree_skb_any(skb);
704 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
709 * Issue a command to enable writes to the NOVRAM
711 NVRAM_CMD (EXTEND + EWEN);
714 * issue the write command
716 NVRAM_CMD(IAWRITE + addr);
718 * Send the data, starting with D15, then D14, and so on for 16 bits
720 for (i=15; i>=0; i--) {
721 NVRAM_CLKOUT (val & 0x8000);
726 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
728 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
732 * disable writes again
734 NVRAM_CMD(EXTEND + EWDS)
740 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
746 * Read the first bit that was clocked with the falling edge of the
747 * the last command data clock
749 NVRAM_CMD(IAREAD + addr);
751 * Now read the rest of the bits, the next bit read is D14, then D13,
755 for (i=15; i>=0; i--) {
764 static void ia_hw_type(IADEV *iadev) {
765 u_short memType = ia_eeprom_get(iadev, 25);
766 iadev->memType = memType;
767 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
768 iadev->num_tx_desc = IA_TX_BUF;
769 iadev->tx_buf_sz = IA_TX_BUF_SZ;
770 iadev->num_rx_desc = IA_RX_BUF;
771 iadev->rx_buf_sz = IA_RX_BUF_SZ;
772 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
773 if (IA_TX_BUF == DFL_TX_BUFFERS)
774 iadev->num_tx_desc = IA_TX_BUF / 2;
776 iadev->num_tx_desc = IA_TX_BUF;
777 iadev->tx_buf_sz = IA_TX_BUF_SZ;
778 if (IA_RX_BUF == DFL_RX_BUFFERS)
779 iadev->num_rx_desc = IA_RX_BUF / 2;
781 iadev->num_rx_desc = IA_RX_BUF;
782 iadev->rx_buf_sz = IA_RX_BUF_SZ;
785 if (IA_TX_BUF == DFL_TX_BUFFERS)
786 iadev->num_tx_desc = IA_TX_BUF / 8;
788 iadev->num_tx_desc = IA_TX_BUF;
789 iadev->tx_buf_sz = IA_TX_BUF_SZ;
790 if (IA_RX_BUF == DFL_RX_BUFFERS)
791 iadev->num_rx_desc = IA_RX_BUF / 8;
793 iadev->num_rx_desc = IA_RX_BUF;
794 iadev->rx_buf_sz = IA_RX_BUF_SZ;
796 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
797 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
798 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
799 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
802 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
803 iadev->phy_type = PHY_OC3C_S;
804 else if ((memType & FE_MASK) == FE_UTP_OPTION)
805 iadev->phy_type = PHY_UTP155;
807 iadev->phy_type = PHY_OC3C_M;
810 iadev->phy_type = memType & FE_MASK;
811 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
812 memType,iadev->phy_type);)
813 if (iadev->phy_type == FE_25MBIT_PHY)
814 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
815 else if (iadev->phy_type == FE_DS3_PHY)
816 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
817 else if (iadev->phy_type == FE_E3_PHY)
818 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
820 iadev->LineRate = (u32)(ATM_OC3_PCR);
821 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
825 static void IaFrontEndIntr(IADEV *iadev) {
826 volatile IA_SUNI *suni;
827 volatile ia_mb25_t *mb25;
828 volatile suni_pm7345_t *suni_pm7345;
832 if(iadev->phy_type & FE_25MBIT_PHY) {
833 mb25 = (ia_mb25_t*)iadev->phy;
834 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
835 } else if (iadev->phy_type & FE_DS3_PHY) {
836 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
837 /* clear FRMR interrupts */
838 frmr_intr = suni_pm7345->suni_ds3_frm_intr_stat;
839 iadev->carrier_detect =
840 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
841 } else if (iadev->phy_type & FE_E3_PHY ) {
842 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
843 frmr_intr = suni_pm7345->suni_e3_frm_maint_intr_ind;
844 iadev->carrier_detect =
845 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
848 suni = (IA_SUNI *)iadev->phy;
849 intr_status = suni->suni_rsop_status & 0xff;
850 iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
852 if (iadev->carrier_detect)
853 printk("IA: SUNI carrier detected\n");
855 printk("IA: SUNI carrier lost signal\n");
859 static void ia_mb25_init (IADEV *iadev)
861 volatile ia_mb25_t *mb25 = (ia_mb25_t*)iadev->phy;
863 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
865 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
866 mb25->mb25_diag_control = 0;
868 * Initialize carrier detect state
870 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
874 static void ia_suni_pm7345_init (IADEV *iadev)
876 volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
877 if (iadev->phy_type & FE_DS3_PHY)
879 iadev->carrier_detect =
880 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
881 suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
882 suni_pm7345->suni_ds3_frm_cfg = 1;
883 suni_pm7345->suni_ds3_tran_cfg = 1;
884 suni_pm7345->suni_config = 0;
885 suni_pm7345->suni_splr_cfg = 0;
886 suni_pm7345->suni_splt_cfg = 0;
890 iadev->carrier_detect =
891 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
892 suni_pm7345->suni_e3_frm_fram_options = 0x4;
893 suni_pm7345->suni_e3_frm_maint_options = 0x20;
894 suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
895 suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
896 suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
897 suni_pm7345->suni_e3_tran_fram_options = 0x1;
898 suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
899 suni_pm7345->suni_splr_cfg = 0x41;
900 suni_pm7345->suni_splt_cfg = 0x41;
903 * Enable RSOP loss of signal interrupt.
905 suni_pm7345->suni_intr_enbl = 0x28;
908 * Clear error counters
910 suni_pm7345->suni_id_reset = 0;
913 * Clear "PMCTST" in master test register.
915 suni_pm7345->suni_master_test = 0;
917 suni_pm7345->suni_rxcp_ctrl = 0x2c;
918 suni_pm7345->suni_rxcp_fctrl = 0x81;
920 suni_pm7345->suni_rxcp_idle_pat_h1 =
921 suni_pm7345->suni_rxcp_idle_pat_h2 =
922 suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
923 suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
925 suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
926 suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
927 suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
928 suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
930 suni_pm7345->suni_rxcp_cell_pat_h1 =
931 suni_pm7345->suni_rxcp_cell_pat_h2 =
932 suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
933 suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
935 suni_pm7345->suni_rxcp_cell_mask_h1 =
936 suni_pm7345->suni_rxcp_cell_mask_h2 =
937 suni_pm7345->suni_rxcp_cell_mask_h3 =
938 suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
940 suni_pm7345->suni_txcp_ctrl = 0xa4;
941 suni_pm7345->suni_txcp_intr_en_sts = 0x10;
942 suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
944 suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
949 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
950 #endif /* __SNMP__ */
955 /***************************** IA_LIB END *****************************/
957 #ifdef CONFIG_ATM_IA_DEBUG
958 static int tcnter = 0;
959 static void xdump( u_char* cp, int length, char* prefix )
963 u_char* pBuf = prntBuf;
965 while(count < length){
966 pBuf += sprintf( pBuf, "%s", prefix );
967 for(col = 0;count + col < length && col < 16; col++){
968 if (col != 0 && (col % 4) == 0)
969 pBuf += sprintf( pBuf, " " );
970 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
972 while(col++ < 16){ /* pad end of buffer with blanks */
974 sprintf( pBuf, " " );
975 pBuf += sprintf( pBuf, " " );
977 pBuf += sprintf( pBuf, " " );
978 for(col = 0;count + col < length && col < 16; col++){
979 if (isprint((int)cp[count + col]))
980 pBuf += sprintf( pBuf, "%c", cp[count + col] );
982 pBuf += sprintf( pBuf, "." );
984 sprintf( pBuf, "\n" );
991 } /* close xdump(... */
992 #endif /* CONFIG_ATM_IA_DEBUG */
995 static struct atm_dev *ia_boards = NULL;
997 #define ACTUAL_RAM_BASE \
998 RAM_BASE*((iadev->mem)/(128 * 1024))
999 #define ACTUAL_SEG_RAM_BASE \
1000 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1001 #define ACTUAL_REASS_RAM_BASE \
1002 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1005 /*-- some utilities and memory allocation stuff will come here -------------*/
1007 static void desc_dbg(IADEV *iadev) {
1009 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1012 // regval = readl((u32)ia_cmds->maddr);
1013 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1014 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1015 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1016 readw(iadev->seg_ram+tcq_wr_ptr-2));
1017 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1019 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1020 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1021 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1023 while (tcq_st_ptr != tcq_ed_ptr) {
1024 tmp = iadev->seg_ram+tcq_st_ptr;
1025 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1028 for(i=0; i <iadev->num_tx_desc; i++)
1029 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1033 /*----------------------------- Recieving side stuff --------------------------*/
1035 static void rx_excp_rcvd(struct atm_dev *dev)
1037 #if 0 /* closing the receiving size will cause too many excp int */
1040 u_short excpq_rd_ptr;
1043 iadev = INPH_IA_DEV(dev);
1044 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1045 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1046 { printk("state = %x \n", state);
1047 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1048 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1049 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1050 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1051 // TODO: update exception stat
1052 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1053 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1056 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1057 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1058 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1059 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1064 static void free_desc(struct atm_dev *dev, int desc)
1067 iadev = INPH_IA_DEV(dev);
1068 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1069 iadev->rfL.fdq_wr +=2;
1070 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1071 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1072 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1076 static int rx_pkt(struct atm_dev *dev)
1079 struct atm_vcc *vcc;
1080 unsigned short status;
1081 struct rx_buf_desc __iomem *buf_desc_ptr;
1085 struct sk_buff *skb;
1086 u_int buf_addr, dma_addr;
1088 iadev = INPH_IA_DEV(dev);
1089 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1091 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1094 /* mask 1st 3 bits to get the actual descno. */
1095 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1096 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1097 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1098 printk(" pcq_wr_ptr = 0x%x\n",
1099 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1100 /* update the read pointer - maybe we shud do this in the end*/
1101 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1102 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1104 iadev->rfL.pcq_rd += 2;
1105 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1107 /* get the buffer desc entry.
1108 update stuff. - doesn't seem to be any update necessary
1110 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1111 /* make the ptr point to the corresponding buffer desc entry */
1112 buf_desc_ptr += desc;
1113 if (!desc || (desc > iadev->num_rx_desc) ||
1114 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1115 free_desc(dev, desc);
1116 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1119 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1122 free_desc(dev, desc);
1123 printk("IA: null vcc, drop PDU\n");
1128 /* might want to check the status bits for errors */
1129 status = (u_short) (buf_desc_ptr->desc_mode);
1130 if (status & (RX_CER | RX_PTE | RX_OFL))
1132 atomic_inc(&vcc->stats->rx_err);
1133 IF_ERR(printk("IA: bad packet, dropping it");)
1134 if (status & RX_CER) {
1135 IF_ERR(printk(" cause: packet CRC error\n");)
1137 else if (status & RX_PTE) {
1138 IF_ERR(printk(" cause: packet time out\n");)
1141 IF_ERR(printk(" cause: buffer over flow\n");)
1150 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1151 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1152 len = dma_addr - buf_addr;
1153 if (len > iadev->rx_buf_sz) {
1154 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1155 atomic_inc(&vcc->stats->rx_err);
1159 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1161 printk("Drop control packets\n");
1166 ATM_SKB(skb)->vcc = vcc;
1167 ATM_DESC(skb) = desc;
1168 skb_queue_tail(&iadev->rx_dma_q, skb);
1170 /* Build the DLE structure */
1171 wr_ptr = iadev->rx_dle_q.write;
1172 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1173 len, PCI_DMA_FROMDEVICE);
1174 wr_ptr->local_pkt_addr = buf_addr;
1175 wr_ptr->bytes = len; /* We don't know this do we ?? */
1176 wr_ptr->mode = DMA_INT_ENABLE;
1178 /* shud take care of wrap around here too. */
1179 if(++wr_ptr == iadev->rx_dle_q.end)
1180 wr_ptr = iadev->rx_dle_q.start;
1181 iadev->rx_dle_q.write = wr_ptr;
1183 /* Increment transaction counter */
1184 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1187 free_desc(dev, desc);
1191 static void rx_intr(struct atm_dev *dev)
1197 iadev = INPH_IA_DEV(dev);
1198 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1199 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1200 if (status & RX_PKT_RCVD)
1203 /* Basically recvd an interrupt for receving a packet.
1204 A descriptor would have been written to the packet complete
1205 queue. Get all the descriptors and set up dma to move the
1206 packets till the packet complete queue is empty..
1208 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1209 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1210 while(!(state & PCQ_EMPTY))
1213 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1217 if (status & RX_FREEQ_EMPT)
1220 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1221 iadev->rx_tmp_jif = jiffies;
1224 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1225 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1226 for (i = 1; i <= iadev->num_rx_desc; i++)
1228 printk("Test logic RUN!!!!\n");
1229 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1232 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1235 if (status & RX_EXCP_RCVD)
1237 /* probably need to handle the exception queue also. */
1238 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1243 if (status & RX_RAW_RCVD)
1245 /* need to handle the raw incoming cells. This deepnds on
1246 whether we have programmed to receive the raw cells or not.
1248 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1253 static void rx_dle_intr(struct atm_dev *dev)
1256 struct atm_vcc *vcc;
1257 struct sk_buff *skb;
1260 struct dle *dle, *cur_dle;
1263 iadev = INPH_IA_DEV(dev);
1265 /* free all the dles done, that is just update our own dle read pointer
1266 - do we really need to do this. Think not. */
1267 /* DMA is done, just get all the recevie buffers from the rx dma queue
1268 and push them up to the higher layer protocol. Also free the desc
1269 associated with the buffer. */
1270 dle = iadev->rx_dle_q.read;
1271 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1272 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1273 while(dle != cur_dle)
1275 /* free the DMAed skb */
1276 skb = skb_dequeue(&iadev->rx_dma_q);
1279 desc = ATM_DESC(skb);
1280 free_desc(dev, desc);
1282 if (!(len = skb->len))
1284 printk("rx_dle_intr: skb len 0\n");
1285 dev_kfree_skb_any(skb);
1289 struct cpcs_trailer *trailer;
1291 struct ia_vcc *ia_vcc;
1293 pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1294 len, PCI_DMA_FROMDEVICE);
1295 /* no VCC related housekeeping done as yet. lets see */
1296 vcc = ATM_SKB(skb)->vcc;
1298 printk("IA: null vcc\n");
1299 dev_kfree_skb_any(skb);
1302 ia_vcc = INPH_IA_VCC(vcc);
1305 atomic_inc(&vcc->stats->rx_err);
1306 dev_kfree_skb_any(skb);
1307 atm_return(vcc, atm_guess_pdu2truesize(len));
1310 // get real pkt length pwang_test
1311 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1312 skb->len - sizeof(*trailer));
1313 length = swap(trailer->length);
1314 if ((length > iadev->rx_buf_sz) || (length >
1315 (skb->len - sizeof(struct cpcs_trailer))))
1317 atomic_inc(&vcc->stats->rx_err);
1318 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1320 dev_kfree_skb_any(skb);
1321 atm_return(vcc, atm_guess_pdu2truesize(len));
1324 skb_trim(skb, length);
1326 /* Display the packet */
1327 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1328 xdump(skb->data, skb->len, "RX: ");
1331 IF_RX(printk("rx_dle_intr: skb push");)
1333 atomic_inc(&vcc->stats->rx);
1334 iadev->rx_pkt_cnt++;
1337 if (++dle == iadev->rx_dle_q.end)
1338 dle = iadev->rx_dle_q.start;
1340 iadev->rx_dle_q.read = dle;
1342 /* if the interrupts are masked because there were no free desc available,
1344 if (!iadev->rxing) {
1345 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1346 if (!(state & FREEQ_EMPTY)) {
1347 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1348 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1349 iadev->reass_reg+REASS_MASK_REG);
1356 static int open_rx(struct atm_vcc *vcc)
1359 u_short __iomem *vc_table;
1360 u_short __iomem *reass_ptr;
1361 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1363 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1364 iadev = INPH_IA_DEV(vcc->dev);
1365 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1366 if (iadev->phy_type & FE_25MBIT_PHY) {
1367 printk("IA: ABR not support\n");
1371 /* Make only this VCI in the vc table valid and let all
1372 others be invalid entries */
1373 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1374 vc_table += vcc->vci;
1375 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1377 *vc_table = vcc->vci << 6;
1378 /* Also keep a list of open rx vcs so that we can attach them with
1379 incoming PDUs later. */
1380 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1381 (vcc->qos.txtp.traffic_class == ATM_ABR))
1383 srv_cls_param_t srv_p;
1384 init_abr_vc(iadev, &srv_p);
1385 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1387 else { /* for UBR later may need to add CBR logic */
1388 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1389 reass_ptr += vcc->vci;
1390 *reass_ptr = NO_AAL5_PKT;
1393 if (iadev->rx_open[vcc->vci])
1394 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1395 vcc->dev->number, vcc->vci);
1396 iadev->rx_open[vcc->vci] = vcc;
1400 static int rx_init(struct atm_dev *dev)
1403 struct rx_buf_desc __iomem *buf_desc_ptr;
1404 unsigned long rx_pkt_start = 0;
1406 struct abr_vc_table *abr_vc_table;
1410 int i,j, vcsize_sel;
1411 u_short freeq_st_adr;
1412 u_short *freeq_start;
1414 iadev = INPH_IA_DEV(dev);
1415 // spin_lock_init(&iadev->rx_lock);
1417 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1418 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1419 &iadev->rx_dle_dma);
1421 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1424 iadev->rx_dle_q.start = (struct dle*)dle_addr;
1425 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1426 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1427 iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1428 /* the end of the dle q points to the entry after the last
1429 DLE that can be used. */
1431 /* write the upper 20 bits of the start address to rx list address register */
1432 writel(iadev->rx_dle_dma & 0xfffff000,
1433 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1434 IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n",
1435 (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR),
1436 *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
1437 printk("Rx Dle list addr: 0x%08x value: 0x%0x\n",
1438 (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR),
1439 *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
1441 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1442 writew(0, iadev->reass_reg+MODE_REG);
1443 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1445 /* Receive side control memory map
1446 -------------------------------
1448 Buffer descr 0x0000 (736 - 23K)
1449 VP Table 0x5c00 (256 - 512)
1450 Except q 0x5e00 (128 - 512)
1451 Free buffer q 0x6000 (1K - 2K)
1452 Packet comp q 0x6800 (1K - 2K)
1453 Reass Table 0x7000 (1K - 2K)
1454 VC Table 0x7800 (1K - 2K)
1455 ABR VC Table 0x8000 (1K - 32K)
1458 /* Base address for Buffer Descriptor Table */
1459 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1460 /* Set the buffer size register */
1461 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1463 /* Initialize each entry in the Buffer Descriptor Table */
1464 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1465 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1466 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1468 rx_pkt_start = iadev->rx_pkt_ram;
1469 for(i=1; i<=iadev->num_rx_desc; i++)
1471 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1472 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1473 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1475 rx_pkt_start += iadev->rx_buf_sz;
1477 IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)
1478 i = FREE_BUF_DESC_Q*iadev->memSize;
1479 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1480 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1481 writew(i+iadev->num_rx_desc*sizeof(u_short),
1482 iadev->reass_reg+FREEQ_ED_ADR);
1483 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1484 writew(i+iadev->num_rx_desc*sizeof(u_short),
1485 iadev->reass_reg+FREEQ_WR_PTR);
1486 /* Fill the FREEQ with all the free descriptors. */
1487 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1488 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1489 for(i=1; i<=iadev->num_rx_desc; i++)
1491 *freeq_start = (u_short)i;
1494 IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)
1495 /* Packet Complete Queue */
1496 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1497 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1498 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1499 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1500 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1502 /* Exception Queue */
1503 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1504 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1505 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1506 iadev->reass_reg+EXCP_Q_ED_ADR);
1507 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1508 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1510 /* Load local copy of FREEQ and PCQ ptrs */
1511 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1512 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1513 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1514 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1515 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1516 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1517 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1518 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1520 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1521 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1522 iadev->rfL.pcq_wr);)
1523 /* just for check - no VP TBL */
1525 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1526 /* initialize VP Table for invalid VPIs
1527 - I guess we can write all 1s or 0x000f in the entire memory
1528 space or something similar.
1531 /* This seems to work and looks right to me too !!! */
1532 i = REASS_TABLE * iadev->memSize;
1533 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1534 /* initialize Reassembly table to I don't know what ???? */
1535 reass_table = (u16 *)(iadev->reass_ram+i);
1536 j = REASS_TABLE_SZ * iadev->memSize;
1537 for(i=0; i < j; i++)
1538 *reass_table++ = NO_AAL5_PKT;
1541 while (i != iadev->num_vc) {
1545 i = RX_VC_TABLE * iadev->memSize;
1546 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1547 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1548 j = RX_VC_TABLE_SZ * iadev->memSize;
1549 for(i = 0; i < j; i++)
1551 /* shift the reassembly pointer by 3 + lower 3 bits of
1552 vc_lkup_base register (=3 for 1K VCs) and the last byte
1553 is those low 3 bits.
1554 Shall program this later.
1556 *vc_table = (i << 6) | 15; /* for invalid VCI */
1560 i = ABR_VC_TABLE * iadev->memSize;
1561 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1563 i = ABR_VC_TABLE * iadev->memSize;
1564 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1565 j = REASS_TABLE_SZ * iadev->memSize;
1566 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1567 for(i = 0; i < j; i++) {
1568 abr_vc_table->rdf = 0x0003;
1569 abr_vc_table->air = 0x5eb1;
1573 /* Initialize other registers */
1575 /* VP Filter Register set for VC Reassembly only */
1576 writew(0xff00, iadev->reass_reg+VP_FILTER);
1577 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1578 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1580 /* Packet Timeout Count related Registers :
1581 Set packet timeout to occur in about 3 seconds
1582 Set Packet Aging Interval count register to overflow in about 4 us
1584 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1586 i = ((u32)ptr16 >> 6) & 0xff;
1588 i |=(((u32)ptr16 << 2) & 0xff00);
1589 writew(i, iadev->reass_reg+TMOUT_RANGE);
1590 /* initiate the desc_tble */
1591 for(i=0; i<iadev->num_tx_desc;i++)
1592 iadev->desc_tbl[i].timestamp = 0;
1594 /* to clear the interrupt status register - read it */
1595 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1597 /* Mask Register - clear it */
1598 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1600 skb_queue_head_init(&iadev->rx_dma_q);
1601 iadev->rx_free_desc_qhead = NULL;
1603 iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1604 if (!iadev->rx_open) {
1605 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1611 iadev->rx_pkt_cnt = 0;
1613 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1617 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1625 The memory map suggested in appendix A and the coding for it.
1626 Keeping it around just in case we change our mind later.
1628 Buffer descr 0x0000 (128 - 4K)
1629 UBR sched 0x1000 (1K - 4K)
1630 UBR Wait q 0x2000 (1K - 4K)
1631 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1633 extended VC 0x4000 (1K - 8K)
1634 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1635 CBR sched 0x7000 (as needed)
1636 VC table 0x8000 (1K - 32K)
1639 static void tx_intr(struct atm_dev *dev)
1642 unsigned short status;
1643 unsigned long flags;
1645 iadev = INPH_IA_DEV(dev);
1647 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1648 if (status & TRANSMIT_DONE){
1650 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1651 spin_lock_irqsave(&iadev->tx_lock, flags);
1653 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1654 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1655 if (iadev->close_pending)
1656 wake_up(&iadev->close_wait);
1658 if (status & TCQ_NOT_EMPTY)
1660 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1664 static void tx_dle_intr(struct atm_dev *dev)
1667 struct dle *dle, *cur_dle;
1668 struct sk_buff *skb;
1669 struct atm_vcc *vcc;
1670 struct ia_vcc *iavcc;
1672 unsigned long flags;
1674 iadev = INPH_IA_DEV(dev);
1675 spin_lock_irqsave(&iadev->tx_lock, flags);
1676 dle = iadev->tx_dle_q.read;
1677 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1678 (sizeof(struct dle)*DLE_ENTRIES - 1);
1679 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1680 while (dle != cur_dle)
1682 /* free the DMAed skb */
1683 skb = skb_dequeue(&iadev->tx_dma_q);
1686 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1687 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1688 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1691 vcc = ATM_SKB(skb)->vcc;
1693 printk("tx_dle_intr: vcc is null\n");
1694 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1695 dev_kfree_skb_any(skb);
1699 iavcc = INPH_IA_VCC(vcc);
1701 printk("tx_dle_intr: iavcc is null\n");
1702 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1703 dev_kfree_skb_any(skb);
1706 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1707 if ((vcc->pop) && (skb->len != 0))
1712 dev_kfree_skb_any(skb);
1715 else { /* Hold the rate-limited skb for flow control */
1716 IA_SKB_STATE(skb) |= IA_DLED;
1717 skb_queue_tail(&iavcc->txing_skb, skb);
1719 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1720 if (++dle == iadev->tx_dle_q.end)
1721 dle = iadev->tx_dle_q.start;
1723 iadev->tx_dle_q.read = dle;
1724 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1727 static int open_tx(struct atm_vcc *vcc)
1729 struct ia_vcc *ia_vcc;
1734 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1735 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1736 iadev = INPH_IA_DEV(vcc->dev);
1738 if (iadev->phy_type & FE_25MBIT_PHY) {
1739 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1740 printk("IA: ABR not support\n");
1743 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1744 printk("IA: CBR not support\n");
1748 ia_vcc = INPH_IA_VCC(vcc);
1749 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1750 if (vcc->qos.txtp.max_sdu >
1751 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1752 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1753 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1754 vcc->dev_data = NULL;
1758 ia_vcc->vc_desc_cnt = 0;
1762 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1763 vcc->qos.txtp.pcr = iadev->LineRate;
1764 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1765 vcc->qos.txtp.pcr = iadev->LineRate;
1766 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1767 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1768 if (vcc->qos.txtp.pcr > iadev->LineRate)
1769 vcc->qos.txtp.pcr = iadev->LineRate;
1770 ia_vcc->pcr = vcc->qos.txtp.pcr;
1772 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1773 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1774 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1775 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1776 if (ia_vcc->pcr < iadev->rate_limit)
1777 skb_queue_head_init (&ia_vcc->txing_skb);
1778 if (ia_vcc->pcr < iadev->rate_limit) {
1779 struct sock *sk = sk_atm(vcc);
1781 if (vcc->qos.txtp.max_sdu != 0) {
1782 if (ia_vcc->pcr > 60000)
1783 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1784 else if (ia_vcc->pcr > 2000)
1785 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1787 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1790 sk->sk_sndbuf = 24576;
1793 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1794 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1797 memset((caddr_t)vc, 0, sizeof(*vc));
1798 memset((caddr_t)evc, 0, sizeof(*evc));
1800 /* store the most significant 4 bits of vci as the last 4 bits
1801 of first part of atm header.
1802 store the last 12 bits of vci as first 12 bits of the second
1803 part of the atm header.
1805 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1806 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1808 /* check the following for different traffic classes */
1809 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1812 vc->status = CRC_APPEND;
1813 vc->acr = cellrate_to_float(iadev->LineRate);
1814 if (vcc->qos.txtp.pcr > 0)
1815 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1816 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1817 vcc->qos.txtp.max_pcr,vc->acr);)
1819 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1820 { srv_cls_param_t srv_p;
1821 IF_ABR(printk("Tx ABR VCC\n");)
1822 init_abr_vc(iadev, &srv_p);
1823 if (vcc->qos.txtp.pcr > 0)
1824 srv_p.pcr = vcc->qos.txtp.pcr;
1825 if (vcc->qos.txtp.min_pcr > 0) {
1826 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1827 if (tmpsum > iadev->LineRate)
1829 srv_p.mcr = vcc->qos.txtp.min_pcr;
1830 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1833 if (vcc->qos.txtp.icr)
1834 srv_p.icr = vcc->qos.txtp.icr;
1835 if (vcc->qos.txtp.tbe)
1836 srv_p.tbe = vcc->qos.txtp.tbe;
1837 if (vcc->qos.txtp.frtt)
1838 srv_p.frtt = vcc->qos.txtp.frtt;
1839 if (vcc->qos.txtp.rif)
1840 srv_p.rif = vcc->qos.txtp.rif;
1841 if (vcc->qos.txtp.rdf)
1842 srv_p.rdf = vcc->qos.txtp.rdf;
1843 if (vcc->qos.txtp.nrm_pres)
1844 srv_p.nrm = vcc->qos.txtp.nrm;
1845 if (vcc->qos.txtp.trm_pres)
1846 srv_p.trm = vcc->qos.txtp.trm;
1847 if (vcc->qos.txtp.adtf_pres)
1848 srv_p.adtf = vcc->qos.txtp.adtf;
1849 if (vcc->qos.txtp.cdf_pres)
1850 srv_p.cdf = vcc->qos.txtp.cdf;
1851 if (srv_p.icr > srv_p.pcr)
1852 srv_p.icr = srv_p.pcr;
1853 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1854 srv_p.pcr, srv_p.mcr);)
1855 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1856 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1857 if (iadev->phy_type & FE_25MBIT_PHY) {
1858 printk("IA: CBR not support\n");
1861 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1862 IF_CBR(printk("PCR is not availble\n");)
1866 vc->status = CRC_APPEND;
1867 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1872 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1874 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1875 IF_EVENT(printk("ia open_tx returning \n");)
1880 static int tx_init(struct atm_dev *dev)
1883 struct tx_buf_desc *buf_desc_ptr;
1884 unsigned int tx_pkt_start;
1896 iadev = INPH_IA_DEV(dev);
1897 spin_lock_init(&iadev->tx_lock);
1899 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1900 readw(iadev->seg_reg+SEG_MASK_REG));)
1902 /* Allocate 4k (boundary aligned) bytes */
1903 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1904 &iadev->tx_dle_dma);
1906 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1909 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1910 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1911 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1912 iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1914 /* write the upper 20 bits of the start address to tx list address register */
1915 writel(iadev->tx_dle_dma & 0xfffff000,
1916 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1917 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1918 writew(0, iadev->seg_reg+MODE_REG_0);
1919 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1920 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1921 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1922 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1925 Transmit side control memory map
1926 --------------------------------
1927 Buffer descr 0x0000 (128 - 4K)
1928 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1931 CBR Table 0x1800 (as needed) - 6K
1932 UBR Table 0x3000 (1K - 4K) - 12K
1933 UBR Wait queue 0x4000 (1K - 4K) - 16K
1934 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1935 ABR Tbl - 20K, ABR Wq - 22K
1936 extended VC 0x6000 (1K - 8K) - 24K
1937 VC Table 0x8000 (1K - 32K) - 32K
1939 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1940 and Wait q, which can be allotted later.
1943 /* Buffer Descriptor Table Base address */
1944 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1946 /* initialize each entry in the buffer descriptor table */
1947 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1948 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1950 tx_pkt_start = TX_PACKET_RAM;
1951 for(i=1; i<=iadev->num_tx_desc; i++)
1953 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1954 buf_desc_ptr->desc_mode = AAL5;
1955 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1956 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1958 tx_pkt_start += iadev->tx_buf_sz;
1960 iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1961 if (!iadev->tx_buf) {
1962 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1965 for (i= 0; i< iadev->num_tx_desc; i++)
1967 struct cpcs_trailer *cpcs;
1969 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1971 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1972 goto err_free_tx_bufs;
1974 iadev->tx_buf[i].cpcs = cpcs;
1975 iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1976 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1978 iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1979 sizeof(struct desc_tbl_t), GFP_KERNEL);
1980 if (!iadev->desc_tbl) {
1981 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1982 goto err_free_all_tx_bufs;
1985 /* Communication Queues base address */
1986 i = TX_COMP_Q * iadev->memSize;
1987 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
1989 /* Transmit Complete Queue */
1990 writew(i, iadev->seg_reg+TCQ_ST_ADR);
1991 writew(i, iadev->seg_reg+TCQ_RD_PTR);
1992 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
1993 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1994 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
1995 iadev->seg_reg+TCQ_ED_ADR);
1996 /* Fill the TCQ with all the free descriptors. */
1997 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
1998 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
1999 for(i=1; i<=iadev->num_tx_desc; i++)
2001 *tcq_start = (u_short)i;
2005 /* Packet Ready Queue */
2006 i = PKT_RDY_Q * iadev->memSize;
2007 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2008 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2009 iadev->seg_reg+PRQ_ED_ADR);
2010 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2011 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2013 /* Load local copy of PRQ and TCQ ptrs */
2014 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2015 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2016 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2018 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2019 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2020 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2022 /* Just for safety initializing the queue to have desc 1 always */
2023 /* Fill the PRQ with all the free descriptors. */
2024 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2025 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2026 for(i=1; i<=iadev->num_tx_desc; i++)
2028 *prq_start = (u_short)0; /* desc 1 in all entries */
2032 IF_INIT(printk("Start CBR Init\n");)
2033 #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2034 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2035 #else /* Charlie's logic is wrong ? */
2036 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2037 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2038 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2041 IF_INIT(printk("value in register = 0x%x\n",
2042 readw(iadev->seg_reg+CBR_PTR_BASE));)
2043 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2044 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2045 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2046 readw(iadev->seg_reg+CBR_TAB_BEG));)
2047 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2048 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2049 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2050 IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2051 (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2052 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2053 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2054 readw(iadev->seg_reg+CBR_TAB_END+1));)
2056 /* Initialize the CBR Schedualing Table */
2057 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2058 0, iadev->num_vc*6);
2059 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2060 iadev->CbrEntryPt = 0;
2061 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2062 iadev->NumEnabledCBR = 0;
2064 /* UBR scheduling Table and wait queue */
2065 /* initialize all bytes of UBR scheduler table and wait queue to 0
2066 - SCHEDSZ is 1K (# of entries).
2067 - UBR Table size is 4K
2068 - UBR wait queue is 4K
2069 since the table and wait queues are contiguous, all the bytes
2070 can be initialized by one memeset.
2075 while (i != iadev->num_vc) {
2080 i = MAIN_VC_TABLE * iadev->memSize;
2081 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2082 i = EXT_VC_TABLE * iadev->memSize;
2083 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2084 i = UBR_SCHED_TABLE * iadev->memSize;
2085 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2086 i = UBR_WAIT_Q * iadev->memSize;
2087 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2088 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2089 0, iadev->num_vc*8);
2090 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2091 /* initialize all bytes of ABR scheduler table and wait queue to 0
2092 - SCHEDSZ is 1K (# of entries).
2093 - ABR Table size is 2K
2094 - ABR wait queue is 2K
2095 since the table and wait queues are contiguous, all the bytes
2096 can be intialized by one memeset.
2098 i = ABR_SCHED_TABLE * iadev->memSize;
2099 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2100 i = ABR_WAIT_Q * iadev->memSize;
2101 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2103 i = ABR_SCHED_TABLE*iadev->memSize;
2104 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2105 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2106 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2107 iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2108 if (!iadev->testTable) {
2109 printk("Get freepage failed\n");
2110 goto err_free_desc_tbl;
2112 for(i=0; i<iadev->num_vc; i++)
2114 memset((caddr_t)vc, 0, sizeof(*vc));
2115 memset((caddr_t)evc, 0, sizeof(*evc));
2116 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2118 if (!iadev->testTable[i])
2119 goto err_free_test_tables;
2120 iadev->testTable[i]->lastTime = 0;
2121 iadev->testTable[i]->fract = 0;
2122 iadev->testTable[i]->vc_status = VC_UBR;
2127 /* Other Initialization */
2129 /* Max Rate Register */
2130 if (iadev->phy_type & FE_25MBIT_PHY) {
2131 writew(RATE25, iadev->seg_reg+MAXRATE);
2132 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2135 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2136 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2138 /* Set Idle Header Reigisters to be sure */
2139 writew(0, iadev->seg_reg+IDLEHEADHI);
2140 writew(0, iadev->seg_reg+IDLEHEADLO);
2142 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2143 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2145 iadev->close_pending = 0;
2146 init_waitqueue_head(&iadev->close_wait);
2147 init_waitqueue_head(&iadev->timeout_wait);
2148 skb_queue_head_init(&iadev->tx_dma_q);
2149 ia_init_rtn_q(&iadev->tx_return_q);
2151 /* RM Cell Protocol ID and Message Type */
2152 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2153 skb_queue_head_init (&iadev->tx_backlog);
2155 /* Mode Register 1 */
2156 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2158 /* Mode Register 0 */
2159 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2161 /* Interrupt Status Register - read to clear */
2162 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2164 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2165 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2166 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2167 iadev->tx_pkt_cnt = 0;
2168 iadev->rate_limit = iadev->LineRate / 3;
2172 err_free_test_tables:
2174 kfree(iadev->testTable[i]);
2175 kfree(iadev->testTable);
2177 kfree(iadev->desc_tbl);
2178 err_free_all_tx_bufs:
2179 i = iadev->num_tx_desc;
2182 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2184 pci_unmap_single(iadev->pci, desc->dma_addr,
2185 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2188 kfree(iadev->tx_buf);
2190 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2196 static irqreturn_t ia_int(int irq, void *dev_id)
2198 struct atm_dev *dev;
2200 unsigned int status;
2204 iadev = INPH_IA_DEV(dev);
2205 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2208 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2209 if (status & STAT_REASSINT)
2212 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2215 if (status & STAT_DLERINT)
2217 /* Clear this bit by writing a 1 to it. */
2218 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2221 if (status & STAT_SEGINT)
2224 IF_EVENT(printk("IA: tx_intr \n");)
2227 if (status & STAT_DLETINT)
2229 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
2232 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2234 if (status & STAT_FEINT)
2235 IaFrontEndIntr(iadev);
2238 return IRQ_RETVAL(handled);
2243 /*----------------------------- entries --------------------------------*/
2244 static int get_esi(struct atm_dev *dev)
2251 iadev = INPH_IA_DEV(dev);
2252 mac1 = cpu_to_be32(le32_to_cpu(readl(
2253 iadev->reg+IPHASE5575_MAC1)));
2254 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2255 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2256 for (i=0; i<MAC1_LEN; i++)
2257 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2259 for (i=0; i<MAC2_LEN; i++)
2260 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2264 static int reset_sar(struct atm_dev *dev)
2268 unsigned int pci[64];
2270 iadev = INPH_IA_DEV(dev);
2272 if ((error = pci_read_config_dword(iadev->pci,
2273 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2275 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2277 if ((error = pci_write_config_dword(iadev->pci,
2278 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2285 static int __devinit ia_init(struct atm_dev *dev)
2288 unsigned long real_base;
2290 unsigned short command;
2293 /* The device has been identified and registered. Now we read
2294 necessary configuration info like memory base address,
2295 interrupt number etc */
2297 IF_INIT(printk(">ia_init\n");)
2298 dev->ci_range.vpi_bits = 0;
2299 dev->ci_range.vci_bits = NR_VCI_LD;
2301 iadev = INPH_IA_DEV(dev);
2302 real_base = pci_resource_start (iadev->pci, 0);
2303 iadev->irq = iadev->pci->irq;
2305 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2307 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2311 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2312 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2314 /* find mapping size of board */
2316 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2318 if (iadev->pci_map_size == 0x100000){
2319 iadev->num_vc = 4096;
2320 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2323 else if (iadev->pci_map_size == 0x40000) {
2324 iadev->num_vc = 1024;
2328 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2331 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2333 /* enable bus mastering */
2334 pci_set_master(iadev->pci);
2337 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2341 /* mapping the physical address to a virtual address in address space */
2342 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2346 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2350 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2351 dev->number, iadev->pci->revision, base, iadev->irq);)
2353 /* filling the iphase dev structure */
2354 iadev->mem = iadev->pci_map_size /2;
2355 iadev->real_base = real_base;
2358 /* Bus Interface Control Registers */
2359 iadev->reg = base + REG_BASE;
2360 /* Segmentation Control Registers */
2361 iadev->seg_reg = base + SEG_BASE;
2362 /* Reassembly Control Registers */
2363 iadev->reass_reg = base + REASS_BASE;
2364 /* Front end/ DMA control registers */
2365 iadev->phy = base + PHY_BASE;
2366 iadev->dma = base + PHY_BASE;
2367 /* RAM - Segmentation RAm and Reassembly RAM */
2368 iadev->ram = base + ACTUAL_RAM_BASE;
2369 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2370 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2372 /* lets print out the above */
2373 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2374 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2375 iadev->phy, iadev->ram, iadev->seg_ram,
2378 /* lets try reading the MAC address */
2379 error = get_esi(dev);
2381 iounmap(iadev->base);
2385 for (i=0; i < ESI_LEN; i++)
2386 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2390 if (reset_sar(dev)) {
2391 iounmap(iadev->base);
2392 printk("IA: reset SAR fail, please try again\n");
2398 static void ia_update_stats(IADEV *iadev) {
2399 if (!iadev->carrier_detect)
2401 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2402 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2403 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2404 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2405 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2406 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2410 static void ia_led_timer(unsigned long arg) {
2411 unsigned long flags;
2412 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2414 static u32 ctrl_reg;
2415 for (i = 0; i < iadev_count; i++) {
2417 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2418 if (blinking[i] == 0) {
2420 ctrl_reg &= (~CTRL_LED);
2421 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2422 ia_update_stats(ia_dev[i]);
2426 ctrl_reg |= CTRL_LED;
2427 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2428 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2429 if (ia_dev[i]->close_pending)
2430 wake_up(&ia_dev[i]->close_wait);
2431 ia_tx_poll(ia_dev[i]);
2432 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2436 mod_timer(&ia_timer, jiffies + HZ / 4);
2440 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2443 writel(value, INPH_IA_DEV(dev)->phy+addr);
2446 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2448 return readl(INPH_IA_DEV(dev)->phy+addr);
2451 static void ia_free_tx(IADEV *iadev)
2455 kfree(iadev->desc_tbl);
2456 for (i = 0; i < iadev->num_vc; i++)
2457 kfree(iadev->testTable[i]);
2458 kfree(iadev->testTable);
2459 for (i = 0; i < iadev->num_tx_desc; i++) {
2460 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2462 pci_unmap_single(iadev->pci, desc->dma_addr,
2463 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2466 kfree(iadev->tx_buf);
2467 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2471 static void ia_free_rx(IADEV *iadev)
2473 kfree(iadev->rx_open);
2474 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2478 static int __devinit ia_start(struct atm_dev *dev)
2484 IF_EVENT(printk(">ia_start\n");)
2485 iadev = INPH_IA_DEV(dev);
2486 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2487 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2488 dev->number, iadev->irq);
2492 /* @@@ should release IRQ on error */
2493 /* enabling memory + master */
2494 if ((error = pci_write_config_word(iadev->pci,
2496 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2498 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2499 "master (0x%x)\n",dev->number, error);
2505 /* Maybe we should reset the front end, initialize Bus Interface Control
2506 Registers and see. */
2508 IF_INIT(printk("Bus ctrl reg: %08x\n",
2509 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2510 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2511 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2519 | CTRL_DLETMASK /* shud be removed l8r */
2526 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2528 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2529 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2530 printk("Bus status reg after init: %08x\n",
2531 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2534 error = tx_init(dev);
2537 error = rx_init(dev);
2541 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2542 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2543 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2544 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2545 phy = 0; /* resolve compiler complaint */
2547 if ((phy=ia_phy_get(dev,0)) == 0x30)
2548 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2550 printk("IA: utopia,rev.%0x\n",phy);)
2552 if (iadev->phy_type & FE_25MBIT_PHY)
2553 ia_mb25_init(iadev);
2554 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2555 ia_suni_pm7345_init(iadev);
2557 error = suni_init(dev);
2560 if (dev->phy->start) {
2561 error = dev->phy->start(dev);
2565 /* Get iadev->carrier_detect status */
2566 IaFrontEndIntr(iadev);
2575 free_irq(iadev->irq, dev);
2580 static void ia_close(struct atm_vcc *vcc)
2585 struct ia_vcc *ia_vcc;
2586 struct sk_buff *skb = NULL;
2587 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2588 unsigned long closetime, flags;
2590 iadev = INPH_IA_DEV(vcc->dev);
2591 ia_vcc = INPH_IA_VCC(vcc);
2592 if (!ia_vcc) return;
2594 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2595 ia_vcc->vc_desc_cnt,vcc->vci);)
2596 clear_bit(ATM_VF_READY,&vcc->flags);
2597 skb_queue_head_init (&tmp_tx_backlog);
2598 skb_queue_head_init (&tmp_vcc_backlog);
2599 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2600 iadev->close_pending++;
2601 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2602 schedule_timeout(50);
2603 finish_wait(&iadev->timeout_wait, &wait);
2604 spin_lock_irqsave(&iadev->tx_lock, flags);
2605 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2606 if (ATM_SKB(skb)->vcc == vcc){
2607 if (vcc->pop) vcc->pop(vcc, skb);
2608 else dev_kfree_skb_any(skb);
2611 skb_queue_tail(&tmp_tx_backlog, skb);
2613 while((skb = skb_dequeue(&tmp_tx_backlog)))
2614 skb_queue_tail(&iadev->tx_backlog, skb);
2615 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2616 closetime = 300000 / ia_vcc->pcr;
2619 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2620 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2621 spin_lock_irqsave(&iadev->tx_lock, flags);
2622 iadev->close_pending--;
2623 iadev->testTable[vcc->vci]->lastTime = 0;
2624 iadev->testTable[vcc->vci]->fract = 0;
2625 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2626 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2627 if (vcc->qos.txtp.min_pcr > 0)
2628 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2630 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2631 ia_vcc = INPH_IA_VCC(vcc);
2632 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2633 ia_cbrVc_close (vcc);
2635 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2638 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2639 // reset reass table
2640 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2641 vc_table += vcc->vci;
2642 *vc_table = NO_AAL5_PKT;
2644 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2645 vc_table += vcc->vci;
2646 *vc_table = (vcc->vci << 6) | 15;
2647 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2648 struct abr_vc_table __iomem *abr_vc_table =
2649 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2650 abr_vc_table += vcc->vci;
2651 abr_vc_table->rdf = 0x0003;
2652 abr_vc_table->air = 0x5eb1;
2654 // Drain the packets
2655 rx_dle_intr(vcc->dev);
2656 iadev->rx_open[vcc->vci] = NULL;
2658 kfree(INPH_IA_VCC(vcc));
2660 vcc->dev_data = NULL;
2661 clear_bit(ATM_VF_ADDR,&vcc->flags);
2665 static int ia_open(struct atm_vcc *vcc)
2668 struct ia_vcc *ia_vcc;
2670 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2672 IF_EVENT(printk("ia: not partially allocated resources\n");)
2673 vcc->dev_data = NULL;
2675 iadev = INPH_IA_DEV(vcc->dev);
2676 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2678 IF_EVENT(printk("iphase open: unspec part\n");)
2679 set_bit(ATM_VF_ADDR,&vcc->flags);
2681 if (vcc->qos.aal != ATM_AAL5)
2683 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2684 vcc->dev->number, vcc->vpi, vcc->vci);)
2686 /* Device dependent initialization */
2687 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2688 if (!ia_vcc) return -ENOMEM;
2689 vcc->dev_data = ia_vcc;
2691 if ((error = open_rx(vcc)))
2693 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2698 if ((error = open_tx(vcc)))
2700 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2705 set_bit(ATM_VF_READY,&vcc->flags);
2709 static u8 first = 1;
2711 ia_timer.expires = jiffies + 3*HZ;
2712 add_timer(&ia_timer);
2717 IF_EVENT(printk("ia open returning\n");)
2721 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2723 IF_EVENT(printk(">ia_change_qos\n");)
2727 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2733 IF_EVENT(printk(">ia_ioctl\n");)
2734 if (cmd != IA_CMD) {
2735 if (!dev->phy->ioctl) return -EINVAL;
2736 return dev->phy->ioctl(dev,cmd,arg);
2738 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2739 board = ia_cmds.status;
2740 if ((board < 0) || (board > iadev_count))
2742 iadev = ia_dev[board];
2743 switch (ia_cmds.cmd) {
2746 switch (ia_cmds.sub_cmd) {
2748 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2749 if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2753 case MEMDUMP_SEGREG:
2754 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2755 tmps = (u16 __user *)ia_cmds.buf;
2756 for(i=0; i<0x80; i+=2, tmps++)
2757 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2761 case MEMDUMP_REASSREG:
2762 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2763 tmps = (u16 __user *)ia_cmds.buf;
2764 for(i=0; i<0x80; i+=2, tmps++)
2765 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2771 ia_regs_t *regs_local;
2775 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2776 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2777 if (!regs_local) return -ENOMEM;
2778 ffL = ®s_local->ffredn;
2779 rfL = ®s_local->rfredn;
2780 /* Copy real rfred registers into the local copy */
2781 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2782 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2783 /* Copy real ffred registers into the local copy */
2784 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2785 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2787 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2792 printk("Board %d registers dumped\n", board);
2798 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2806 printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2807 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2812 struct k_sonet_stats *stats;
2813 stats = &PRIV(_ia_dev[board])->sonet_stats;
2814 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2815 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2816 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2817 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2818 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2819 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2820 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2821 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2822 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2827 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2828 for (i = 1; i <= iadev->num_rx_desc; i++)
2829 free_desc(_ia_dev[board], i);
2830 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2831 iadev->reass_reg+REASS_MASK_REG);
2838 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2839 IaFrontEndIntr(iadev);
2842 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2845 IADebugFlag = ia_cmds.maddr;
2846 printk("New debug option loaded\n");
2862 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2863 void __user *optval, int optlen)
2865 IF_EVENT(printk(">ia_getsockopt\n");)
2869 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2870 void __user *optval, int optlen)
2872 IF_EVENT(printk(">ia_setsockopt\n");)
2876 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2879 struct tx_buf_desc __iomem *buf_desc_ptr;
2883 struct cpcs_trailer *trailer;
2884 struct ia_vcc *iavcc;
2886 iadev = INPH_IA_DEV(vcc->dev);
2887 iavcc = INPH_IA_VCC(vcc);
2888 if (!iavcc->txing) {
2889 printk("discard packet on closed VC\n");
2893 dev_kfree_skb_any(skb);
2897 if (skb->len > iadev->tx_buf_sz - 8) {
2898 printk("Transmit size over tx buffer size\n");
2902 dev_kfree_skb_any(skb);
2905 if ((u32)skb->data & 3) {
2906 printk("Misaligned SKB\n");
2910 dev_kfree_skb_any(skb);
2913 /* Get a descriptor number from our free descriptor queue
2914 We get the descr number from the TCQ now, since I am using
2915 the TCQ as a free buffer queue. Initially TCQ will be
2916 initialized with all the descriptors and is hence, full.
2918 desc = get_desc (iadev, iavcc);
2921 comp_code = desc >> 13;
2924 if ((desc == 0) || (desc > iadev->num_tx_desc))
2926 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2927 atomic_inc(&vcc->stats->tx);
2931 dev_kfree_skb_any(skb);
2932 return 0; /* return SUCCESS */
2937 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2941 /* remember the desc and vcc mapping */
2942 iavcc->vc_desc_cnt++;
2943 iadev->desc_tbl[desc-1].iavcc = iavcc;
2944 iadev->desc_tbl[desc-1].txskb = skb;
2945 IA_SKB_STATE(skb) = 0;
2947 iadev->ffL.tcq_rd += 2;
2948 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2949 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2950 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2952 /* Put the descriptor number in the packet ready queue
2953 and put the updated write pointer in the DLE field
2955 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2957 iadev->ffL.prq_wr += 2;
2958 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2959 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2961 /* Figure out the exact length of the packet and padding required to
2962 make it aligned on a 48 byte boundary. */
2963 total_len = skb->len + sizeof(struct cpcs_trailer);
2964 total_len = ((total_len + 47) / 48) * 48;
2965 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2967 /* Put the packet in a tx buffer */
2968 trailer = iadev->tx_buf[desc-1].cpcs;
2969 IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2970 (u32)skb, (u32)skb->data, skb->len, desc);)
2971 trailer->control = 0;
2973 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2974 trailer->crc32 = 0; /* not needed - dummy bytes */
2976 /* Display the packet */
2977 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2978 skb->len, tcnter++);
2979 xdump(skb->data, skb->len, "TX: ");
2982 /* Build the buffer descriptor */
2983 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2984 buf_desc_ptr += desc; /* points to the corresponding entry */
2985 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2986 /* Huh ? p.115 of users guide describes this as a read-only register */
2987 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2988 buf_desc_ptr->vc_index = vcc->vci;
2989 buf_desc_ptr->bytes = total_len;
2991 if (vcc->qos.txtp.traffic_class == ATM_ABR)
2992 clear_lockup (vcc, iadev);
2994 /* Build the DLE structure */
2995 wr_ptr = iadev->tx_dle_q.write;
2996 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
2997 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
2998 skb->len, PCI_DMA_TODEVICE);
2999 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3000 buf_desc_ptr->buf_start_lo;
3001 /* wr_ptr->bytes = swap(total_len); didn't seem to affect ?? */
3002 wr_ptr->bytes = skb->len;
3004 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3005 if ((wr_ptr->bytes >> 2) == 0xb)
3006 wr_ptr->bytes = 0x30;
3008 wr_ptr->mode = TX_DLE_PSI;
3009 wr_ptr->prq_wr_ptr_data = 0;
3011 /* end is not to be used for the DLE q */
3012 if (++wr_ptr == iadev->tx_dle_q.end)
3013 wr_ptr = iadev->tx_dle_q.start;
3015 /* Build trailer dle */
3016 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3017 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3018 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3020 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3021 wr_ptr->mode = DMA_INT_ENABLE;
3022 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3024 /* end is not to be used for the DLE q */
3025 if (++wr_ptr == iadev->tx_dle_q.end)
3026 wr_ptr = iadev->tx_dle_q.start;
3028 iadev->tx_dle_q.write = wr_ptr;
3029 ATM_DESC(skb) = vcc->vci;
3030 skb_queue_tail(&iadev->tx_dma_q, skb);
3032 atomic_inc(&vcc->stats->tx);
3033 iadev->tx_pkt_cnt++;
3034 /* Increment transaction counter */
3035 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3038 /* add flow control logic */
3039 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3040 if (iavcc->vc_desc_cnt > 10) {
3041 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3042 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3043 iavcc->flow_inc = -1;
3044 iavcc->saved_tx_quota = vcc->tx_quota;
3045 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3046 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3047 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3048 iavcc->flow_inc = 0;
3052 IF_TX(printk("ia send done\n");)
3056 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3059 struct ia_vcc *iavcc;
3060 unsigned long flags;
3062 iadev = INPH_IA_DEV(vcc->dev);
3063 iavcc = INPH_IA_VCC(vcc);
3064 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3067 printk(KERN_CRIT "null skb in ia_send\n");
3068 else dev_kfree_skb_any(skb);
3071 spin_lock_irqsave(&iadev->tx_lock, flags);
3072 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3073 dev_kfree_skb_any(skb);
3074 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3077 ATM_SKB(skb)->vcc = vcc;
3079 if (skb_peek(&iadev->tx_backlog)) {
3080 skb_queue_tail(&iadev->tx_backlog, skb);
3083 if (ia_pkt_tx (vcc, skb)) {
3084 skb_queue_tail(&iadev->tx_backlog, skb);
3087 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3092 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3096 IADEV *iadev = INPH_IA_DEV(dev);
3098 if (iadev->phy_type == FE_25MBIT_PHY) {
3099 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3102 if (iadev->phy_type == FE_DS3_PHY)
3103 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3104 else if (iadev->phy_type == FE_E3_PHY)
3105 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3106 else if (iadev->phy_type == FE_UTP_OPTION)
3107 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3109 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3111 if (iadev->pci_map_size == 0x40000)
3112 n += sprintf(tmpPtr, "-1KVC-");
3114 n += sprintf(tmpPtr, "-4KVC-");
3116 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3117 n += sprintf(tmpPtr, "1M \n");
3118 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3119 n += sprintf(tmpPtr, "512K\n");
3121 n += sprintf(tmpPtr, "128K\n");
3125 return sprintf(page, " Number of Tx Buffer: %u\n"
3126 " Size of Tx Buffer : %u\n"
3127 " Number of Rx Buffer: %u\n"
3128 " Size of Rx Buffer : %u\n"
3129 " Packets Receiverd : %u\n"
3130 " Packets Transmitted: %u\n"
3131 " Cells Received : %u\n"
3132 " Cells Transmitted : %u\n"
3133 " Board Dropped Cells: %u\n"
3134 " Board Dropped Pkts : %u\n",
3135 iadev->num_tx_desc, iadev->tx_buf_sz,
3136 iadev->num_rx_desc, iadev->rx_buf_sz,
3137 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3138 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3139 iadev->drop_rxcell, iadev->drop_rxpkt);
3144 static const struct atmdev_ops ops = {
3148 .getsockopt = ia_getsockopt,
3149 .setsockopt = ia_setsockopt,
3151 .phy_put = ia_phy_put,
3152 .phy_get = ia_phy_get,
3153 .change_qos = ia_change_qos,
3154 .proc_read = ia_proc_read,
3155 .owner = THIS_MODULE,
3158 static int __devinit ia_init_one(struct pci_dev *pdev,
3159 const struct pci_device_id *ent)
3161 struct atm_dev *dev;
3163 unsigned long flags;
3166 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3174 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3175 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3176 if (pci_enable_device(pdev)) {
3178 goto err_out_free_iadev;
3180 dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3183 goto err_out_disable_dev;
3185 dev->dev_data = iadev;
3186 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3187 IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3190 pci_set_drvdata(pdev, dev);
3192 ia_dev[iadev_count] = iadev;
3193 _ia_dev[iadev_count] = dev;
3195 spin_lock_init(&iadev->misc_lock);
3196 /* First fixes first. I don't want to think about this now. */
3197 spin_lock_irqsave(&iadev->misc_lock, flags);
3198 if (ia_init(dev) || ia_start(dev)) {
3199 IF_INIT(printk("IA register failed!\n");)
3201 ia_dev[iadev_count] = NULL;
3202 _ia_dev[iadev_count] = NULL;
3203 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3205 goto err_out_deregister_dev;
3207 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3208 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3210 iadev->next_board = ia_boards;
3215 err_out_deregister_dev:
3216 atm_dev_deregister(dev);
3217 err_out_disable_dev:
3218 pci_disable_device(pdev);
3225 static void __devexit ia_remove_one(struct pci_dev *pdev)
3227 struct atm_dev *dev = pci_get_drvdata(pdev);
3228 IADEV *iadev = INPH_IA_DEV(dev);
3230 /* Disable phy interrupts */
3231 ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3235 if (dev->phy && dev->phy->stop)
3236 dev->phy->stop(dev);
3238 /* De-register device */
3239 free_irq(iadev->irq, dev);
3241 ia_dev[iadev_count] = NULL;
3242 _ia_dev[iadev_count] = NULL;
3243 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3244 atm_dev_deregister(dev);
3246 iounmap(iadev->base);
3247 pci_disable_device(pdev);
3255 static struct pci_device_id ia_pci_tbl[] = {
3256 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3257 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3260 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3262 static struct pci_driver ia_driver = {
3264 .id_table = ia_pci_tbl,
3265 .probe = ia_init_one,
3266 .remove = __devexit_p(ia_remove_one),
3269 static int __init ia_module_init(void)
3273 ret = pci_register_driver(&ia_driver);
3275 ia_timer.expires = jiffies + 3*HZ;
3276 add_timer(&ia_timer);
3278 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3282 static void __exit ia_module_exit(void)
3284 pci_unregister_driver(&ia_driver);
3286 del_timer(&ia_timer);
3289 module_init(ia_module_init);
3290 module_exit(ia_module_exit);