2 A FORE Systems 200E-series driver for ATM on Linux.
3 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
5 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
7 This driver simultaneously supports PCA-200E and SBA-200E adapters
8 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/capability.h>
30 #include <linux/interrupt.h>
31 #include <linux/bitops.h>
32 #include <linux/pci.h>
33 #include <linux/module.h>
34 #include <linux/atmdev.h>
35 #include <linux/sonet.h>
36 #include <linux/atm_suni.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/delay.h>
39 #include <linux/firmware.h>
41 #include <asm/string.h>
45 #include <asm/byteorder.h>
46 #include <asm/uaccess.h>
47 #include <asm/atomic.h>
50 #include <asm/idprom.h>
52 #include <asm/openprom.h>
53 #include <asm/oplib.h>
54 #include <asm/pgtable.h>
57 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
58 #define FORE200E_USE_TASKLET
61 #if 0 /* enable the debugging code of the buffer supply queues */
62 #define FORE200E_BSQ_DEBUG
65 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
66 #define FORE200E_52BYTE_AAL0_SDU
72 #define FORE200E_VERSION "0.3e"
74 #define FORE200E "fore200e: "
76 #if 0 /* override .config */
77 #define CONFIG_ATM_FORE200E_DEBUG 1
79 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
80 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
81 printk(FORE200E format, ##args); } while (0)
83 #define DPRINTK(level, format, args...) do {} while (0)
87 #define FORE200E_ALIGN(addr, alignment) \
88 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
90 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
92 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
94 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
97 #define ASSERT(expr) if (!(expr)) { \
98 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
99 __func__, __LINE__, #expr); \
100 panic(FORE200E "%s", __func__); \
103 #define ASSERT(expr) do {} while (0)
107 static const struct atmdev_ops fore200e_ops;
108 static const struct fore200e_bus fore200e_bus[];
110 static LIST_HEAD(fore200e_boards);
113 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
114 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
115 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
118 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
119 { BUFFER_S1_NBR, BUFFER_L1_NBR },
120 { BUFFER_S2_NBR, BUFFER_L2_NBR }
123 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
124 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
125 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
129 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
130 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
134 #if 0 /* currently unused */
136 fore200e_fore2atm_aal(enum fore200e_aal aal)
139 case FORE200E_AAL0: return ATM_AAL0;
140 case FORE200E_AAL34: return ATM_AAL34;
141 case FORE200E_AAL5: return ATM_AAL5;
149 static enum fore200e_aal
150 fore200e_atm2fore_aal(int aal)
153 case ATM_AAL0: return FORE200E_AAL0;
154 case ATM_AAL34: return FORE200E_AAL34;
157 case ATM_AAL5: return FORE200E_AAL5;
165 fore200e_irq_itoa(int irq)
168 sprintf(str, "%d", irq);
173 /* allocate and align a chunk of memory intended to hold the data behing exchanged
174 between the driver and the adapter (using streaming DVMA) */
177 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
179 unsigned long offset = 0;
181 if (alignment <= sizeof(int))
184 chunk->alloc_size = size + alignment;
185 chunk->align_size = size;
186 chunk->direction = direction;
188 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
189 if (chunk->alloc_addr == NULL)
193 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
195 chunk->align_addr = chunk->alloc_addr + offset;
197 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
203 /* free a chunk of memory */
206 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
208 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
210 kfree(chunk->alloc_addr);
215 fore200e_spin(int msecs)
217 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
218 while (time_before(jiffies, timeout));
223 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
225 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
230 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
233 } while (time_before(jiffies, timeout));
237 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
247 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
249 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
253 if ((ok = (fore200e->bus->read(addr) == val)))
256 } while (time_before(jiffies, timeout));
260 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
261 fore200e->bus->read(addr), val);
270 fore200e_free_rx_buf(struct fore200e* fore200e)
272 int scheme, magn, nbr;
273 struct buffer* buffer;
275 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
276 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
278 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
280 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
282 struct chunk* data = &buffer[ nbr ].data;
284 if (data->alloc_addr != NULL)
285 fore200e_chunk_free(fore200e, data);
294 fore200e_uninit_bs_queue(struct fore200e* fore200e)
298 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
299 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
301 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
302 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
304 if (status->alloc_addr)
305 fore200e->bus->dma_chunk_free(fore200e, status);
307 if (rbd_block->alloc_addr)
308 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
315 fore200e_reset(struct fore200e* fore200e, int diag)
319 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
321 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
323 fore200e->bus->reset(fore200e);
326 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
329 printk(FORE200E "device %s self-test failed\n", fore200e->name);
333 printk(FORE200E "device %s self-test passed\n", fore200e->name);
335 fore200e->state = FORE200E_STATE_RESET;
343 fore200e_shutdown(struct fore200e* fore200e)
345 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
346 fore200e->name, fore200e->phys_base,
347 fore200e_irq_itoa(fore200e->irq));
349 if (fore200e->state > FORE200E_STATE_RESET) {
350 /* first, reset the board to prevent further interrupts or data transfers */
351 fore200e_reset(fore200e, 0);
354 /* then, release all allocated resources */
355 switch(fore200e->state) {
357 case FORE200E_STATE_COMPLETE:
358 kfree(fore200e->stats);
360 case FORE200E_STATE_IRQ:
361 free_irq(fore200e->irq, fore200e->atm_dev);
363 case FORE200E_STATE_ALLOC_BUF:
364 fore200e_free_rx_buf(fore200e);
366 case FORE200E_STATE_INIT_BSQ:
367 fore200e_uninit_bs_queue(fore200e);
369 case FORE200E_STATE_INIT_RXQ:
370 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
371 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
373 case FORE200E_STATE_INIT_TXQ:
374 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
375 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
377 case FORE200E_STATE_INIT_CMDQ:
378 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
380 case FORE200E_STATE_INITIALIZE:
381 /* nothing to do for that state */
383 case FORE200E_STATE_START_FW:
384 /* nothing to do for that state */
386 case FORE200E_STATE_RESET:
387 /* nothing to do for that state */
389 case FORE200E_STATE_MAP:
390 fore200e->bus->unmap(fore200e);
392 case FORE200E_STATE_CONFIGURE:
393 /* nothing to do for that state */
395 case FORE200E_STATE_REGISTER:
396 /* XXX shouldn't we *start* by deregistering the device? */
397 atm_dev_deregister(fore200e->atm_dev);
399 case FORE200E_STATE_BLANK:
400 /* nothing to do for that state */
408 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
410 /* on big-endian hosts, the board is configured to convert
411 the endianess of slave RAM accesses */
412 return le32_to_cpu(readl(addr));
416 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
418 /* on big-endian hosts, the board is configured to convert
419 the endianess of slave RAM accesses */
420 writel(cpu_to_le32(val), addr);
425 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
427 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
429 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
430 virt_addr, size, direction, dma_addr);
437 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
439 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
440 dma_addr, size, direction);
442 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
447 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
449 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
451 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
455 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
457 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
459 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
463 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
464 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
467 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
468 int size, int nbr, int alignment)
470 /* returned chunks are page-aligned */
471 chunk->alloc_size = size * nbr;
472 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
476 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
479 chunk->align_addr = chunk->alloc_addr;
485 /* free a DMA consistent chunk of memory */
488 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
490 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
498 fore200e_pca_irq_check(struct fore200e* fore200e)
500 /* this is a 1 bit register */
501 int irq_posted = readl(fore200e->regs.pca.psr);
503 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
504 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
505 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
514 fore200e_pca_irq_ack(struct fore200e* fore200e)
516 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
521 fore200e_pca_reset(struct fore200e* fore200e)
523 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
525 writel(0, fore200e->regs.pca.hcr);
530 fore200e_pca_map(struct fore200e* fore200e)
532 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
534 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
536 if (fore200e->virt_base == NULL) {
537 printk(FORE200E "can't map device %s\n", fore200e->name);
541 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
543 /* gain access to the PCA specific registers */
544 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
545 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
546 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
548 fore200e->state = FORE200E_STATE_MAP;
554 fore200e_pca_unmap(struct fore200e* fore200e)
556 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
558 if (fore200e->virt_base != NULL)
559 iounmap(fore200e->virt_base);
564 fore200e_pca_configure(struct fore200e* fore200e)
566 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
567 u8 master_ctrl, latency;
569 DPRINTK(2, "device %s being configured\n", fore200e->name);
571 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
572 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
576 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
578 master_ctrl = master_ctrl
579 #if defined(__BIG_ENDIAN)
580 /* request the PCA board to convert the endianess of slave RAM accesses */
581 | PCA200E_CTRL_CONVERT_ENDIAN
584 | PCA200E_CTRL_DIS_CACHE_RD
585 | PCA200E_CTRL_DIS_WRT_INVAL
586 | PCA200E_CTRL_ENA_CONT_REQ_MODE
587 | PCA200E_CTRL_2_CACHE_WRT_INVAL
589 | PCA200E_CTRL_LARGE_PCI_BURSTS;
591 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
593 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
594 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
595 this may impact the performances of other PCI devices on the same bus, though */
597 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
599 fore200e->state = FORE200E_STATE_CONFIGURE;
605 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
607 struct host_cmdq* cmdq = &fore200e->host_cmdq;
608 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
609 struct prom_opcode opcode;
613 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
615 opcode.opcode = OPCODE_GET_PROM;
618 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
620 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
622 *entry->status = STATUS_PENDING;
624 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
626 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
628 *entry->status = STATUS_FREE;
630 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
633 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
637 #if defined(__BIG_ENDIAN)
639 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
641 /* MAC address is stored as little-endian */
642 swap_here(&prom->mac_addr[0]);
643 swap_here(&prom->mac_addr[4]);
651 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
653 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
655 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
656 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
659 #endif /* CONFIG_PCI */
665 fore200e_sba_read(volatile u32 __iomem *addr)
667 return sbus_readl(addr);
672 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
674 sbus_writel(val, addr);
679 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
681 struct sbus_dev *sdev = fore200e->bus_dev;
682 struct device *dev = &sdev->ofdev.dev;
683 u32 dma_addr = dma_map_single(dev, virt_addr, size, direction);
685 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
686 virt_addr, size, direction, dma_addr);
693 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
695 struct sbus_dev *sdev = fore200e->bus_dev;
696 struct device *dev = &sdev->ofdev.dev;
698 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
699 dma_addr, size, direction);
701 dma_unmap_single(dev, dma_addr, size, direction);
706 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
708 struct sbus_dev *sdev = fore200e->bus_dev;
709 struct device *dev = &sdev->ofdev.dev;
711 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
713 dma_sync_single_for_cpu(dev, dma_addr, size, direction);
717 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
719 struct sbus_dev *sdev = fore200e->bus_dev;
720 struct device *dev = &sdev->ofdev.dev;
722 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
724 dma_sync_single_for_device(dev, dma_addr, size, direction);
728 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
729 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
732 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
733 int size, int nbr, int alignment)
735 struct sbus_dev *sdev = (struct sbus_dev *) fore200e->bus_dev;
736 struct device *dev = &sdev->ofdev.dev;
738 chunk->alloc_size = chunk->align_size = size * nbr;
740 /* returned chunks are page-aligned */
741 chunk->alloc_addr = dma_alloc_coherent(dev, chunk->alloc_size,
742 &chunk->dma_addr, GFP_ATOMIC);
744 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
747 chunk->align_addr = chunk->alloc_addr;
753 /* free a DVMA consistent chunk of memory */
756 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
758 struct sbus_dev *sdev = (struct sbus_dev *) fore200e->bus_dev;
759 struct device *dev = &sdev->ofdev.dev;
761 dma_free_coherent(dev, chunk->alloc_size,
762 chunk->alloc_addr, chunk->dma_addr);
767 fore200e_sba_irq_enable(struct fore200e* fore200e)
769 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
770 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
775 fore200e_sba_irq_check(struct fore200e* fore200e)
777 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
782 fore200e_sba_irq_ack(struct fore200e* fore200e)
784 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
785 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
790 fore200e_sba_reset(struct fore200e* fore200e)
792 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
794 fore200e->bus->write(0, fore200e->regs.sba.hcr);
799 fore200e_sba_map(struct fore200e* fore200e)
801 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
804 /* gain access to the SBA specific registers */
805 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
806 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
807 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
808 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
810 if (fore200e->virt_base == NULL) {
811 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
815 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
817 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
819 /* get the supported DVMA burst sizes */
820 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
822 if (sbus_can_dma_64bit(sbus_dev))
823 sbus_set_sbus64(sbus_dev, bursts);
825 fore200e->state = FORE200E_STATE_MAP;
831 fore200e_sba_unmap(struct fore200e* fore200e)
833 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
834 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
835 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
836 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
841 fore200e_sba_configure(struct fore200e* fore200e)
843 fore200e->state = FORE200E_STATE_CONFIGURE;
848 static struct fore200e* __init
849 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
851 struct fore200e* fore200e;
852 struct sbus_bus* sbus_bus;
853 struct sbus_dev* sbus_dev = NULL;
855 unsigned int count = 0;
857 for_each_sbus (sbus_bus) {
858 for_each_sbusdev (sbus_dev, sbus_bus) {
859 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
869 if (sbus_dev->num_registers != 4) {
870 printk(FORE200E "this %s device has %d instead of 4 registers\n",
871 bus->model_name, sbus_dev->num_registers);
875 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
876 if (fore200e == NULL)
880 fore200e->bus_dev = sbus_dev;
881 fore200e->irq = sbus_dev->irqs[ 0 ];
883 fore200e->phys_base = (unsigned long)sbus_dev;
885 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
892 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
894 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
897 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
901 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
905 prom_getproperty(sbus_dev->prom_node, "serialnumber",
906 (char*)&prom->serial_number, sizeof(prom->serial_number));
908 prom_getproperty(sbus_dev->prom_node, "promversion",
909 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
916 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
918 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
920 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
922 #endif /* CONFIG_SBUS */
926 fore200e_tx_irq(struct fore200e* fore200e)
928 struct host_txq* txq = &fore200e->host_txq;
929 struct host_txq_entry* entry;
931 struct fore200e_vc_map* vc_map;
933 if (fore200e->host_txq.txing == 0)
938 entry = &txq->host_entry[ txq->tail ];
940 if ((*entry->status & STATUS_COMPLETE) == 0) {
944 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
945 entry, txq->tail, entry->vc_map, entry->skb);
947 /* free copy of misaligned data */
950 /* remove DMA mapping */
951 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
954 vc_map = entry->vc_map;
956 /* vcc closed since the time the entry was submitted for tx? */
957 if ((vc_map->vcc == NULL) ||
958 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
960 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
961 fore200e->atm_dev->number);
963 dev_kfree_skb_any(entry->skb);
968 /* vcc closed then immediately re-opened? */
969 if (vc_map->incarn != entry->incarn) {
971 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
972 if the same vcc is immediately re-opened, those pending PDUs must
973 not be popped after the completion of their emission, as they refer
974 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
975 would be decremented by the size of the (unrelated) skb, possibly
976 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
977 we thus bind the tx entry to the current incarnation of the vcc
978 when the entry is submitted for tx. When the tx later completes,
979 if the incarnation number of the tx entry does not match the one
980 of the vcc, then this implies that the vcc has been closed then re-opened.
981 we thus just drop the skb here. */
983 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
984 fore200e->atm_dev->number);
986 dev_kfree_skb_any(entry->skb);
992 /* notify tx completion */
994 vcc->pop(vcc, entry->skb);
997 dev_kfree_skb_any(entry->skb);
1000 /* race fixed by the above incarnation mechanism, but... */
1001 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
1002 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
1005 /* check error condition */
1006 if (*entry->status & STATUS_ERROR)
1007 atomic_inc(&vcc->stats->tx_err);
1009 atomic_inc(&vcc->stats->tx);
1013 *entry->status = STATUS_FREE;
1015 fore200e->host_txq.txing--;
1017 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1022 #ifdef FORE200E_BSQ_DEBUG
1023 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1025 struct buffer* buffer;
1028 buffer = bsq->freebuf;
1031 if (buffer->supplied) {
1032 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1033 where, scheme, magn, buffer->index);
1036 if (buffer->magn != magn) {
1037 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1038 where, scheme, magn, buffer->index, buffer->magn);
1041 if (buffer->scheme != scheme) {
1042 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1043 where, scheme, magn, buffer->index, buffer->scheme);
1046 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1047 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1048 where, scheme, magn, buffer->index);
1052 buffer = buffer->next;
1055 if (count != bsq->freebuf_count) {
1056 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1057 where, scheme, magn, count, bsq->freebuf_count);
1065 fore200e_supply(struct fore200e* fore200e)
1067 int scheme, magn, i;
1069 struct host_bsq* bsq;
1070 struct host_bsq_entry* entry;
1071 struct buffer* buffer;
1073 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1074 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1076 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1078 #ifdef FORE200E_BSQ_DEBUG
1079 bsq_audit(1, bsq, scheme, magn);
1081 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1083 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1084 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1086 entry = &bsq->host_entry[ bsq->head ];
1088 for (i = 0; i < RBD_BLK_SIZE; i++) {
1090 /* take the first buffer in the free buffer list */
1091 buffer = bsq->freebuf;
1093 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1094 scheme, magn, bsq->freebuf_count);
1097 bsq->freebuf = buffer->next;
1099 #ifdef FORE200E_BSQ_DEBUG
1100 if (buffer->supplied)
1101 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1102 scheme, magn, buffer->index);
1103 buffer->supplied = 1;
1105 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1106 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1109 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1111 /* decrease accordingly the number of free rx buffers */
1112 bsq->freebuf_count -= RBD_BLK_SIZE;
1114 *entry->status = STATUS_PENDING;
1115 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1123 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1125 struct sk_buff* skb;
1126 struct buffer* buffer;
1127 struct fore200e_vcc* fore200e_vcc;
1129 #ifdef FORE200E_52BYTE_AAL0_SDU
1130 u32 cell_header = 0;
1135 fore200e_vcc = FORE200E_VCC(vcc);
1136 ASSERT(fore200e_vcc);
1138 #ifdef FORE200E_52BYTE_AAL0_SDU
1139 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1141 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1142 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1143 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1144 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1145 rpd->atm_header.clp;
1150 /* compute total PDU length */
1151 for (i = 0; i < rpd->nseg; i++)
1152 pdu_len += rpd->rsd[ i ].length;
1154 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1156 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1158 atomic_inc(&vcc->stats->rx_drop);
1162 __net_timestamp(skb);
1164 #ifdef FORE200E_52BYTE_AAL0_SDU
1166 *((u32*)skb_put(skb, 4)) = cell_header;
1170 /* reassemble segments */
1171 for (i = 0; i < rpd->nseg; i++) {
1173 /* rebuild rx buffer address from rsd handle */
1174 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1176 /* Make device DMA transfer visible to CPU. */
1177 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1179 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1181 /* Now let the device get at it again. */
1182 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1185 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1187 if (pdu_len < fore200e_vcc->rx_min_pdu)
1188 fore200e_vcc->rx_min_pdu = pdu_len;
1189 if (pdu_len > fore200e_vcc->rx_max_pdu)
1190 fore200e_vcc->rx_max_pdu = pdu_len;
1191 fore200e_vcc->rx_pdu++;
1194 if (atm_charge(vcc, skb->truesize) == 0) {
1196 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1197 vcc->itf, vcc->vpi, vcc->vci);
1199 dev_kfree_skb_any(skb);
1201 atomic_inc(&vcc->stats->rx_drop);
1205 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1207 vcc->push(vcc, skb);
1208 atomic_inc(&vcc->stats->rx);
1210 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1217 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1219 struct host_bsq* bsq;
1220 struct buffer* buffer;
1223 for (i = 0; i < rpd->nseg; i++) {
1225 /* rebuild rx buffer address from rsd handle */
1226 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1228 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1230 #ifdef FORE200E_BSQ_DEBUG
1231 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1233 if (buffer->supplied == 0)
1234 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1235 buffer->scheme, buffer->magn, buffer->index);
1236 buffer->supplied = 0;
1239 /* re-insert the buffer into the free buffer list */
1240 buffer->next = bsq->freebuf;
1241 bsq->freebuf = buffer;
1243 /* then increment the number of free rx buffers */
1244 bsq->freebuf_count++;
1250 fore200e_rx_irq(struct fore200e* fore200e)
1252 struct host_rxq* rxq = &fore200e->host_rxq;
1253 struct host_rxq_entry* entry;
1254 struct atm_vcc* vcc;
1255 struct fore200e_vc_map* vc_map;
1259 entry = &rxq->host_entry[ rxq->head ];
1261 /* no more received PDUs */
1262 if ((*entry->status & STATUS_COMPLETE) == 0)
1265 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1267 if ((vc_map->vcc == NULL) ||
1268 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1270 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1271 fore200e->atm_dev->number,
1272 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1278 if ((*entry->status & STATUS_ERROR) == 0) {
1280 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1283 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1284 fore200e->atm_dev->number,
1285 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1286 atomic_inc(&vcc->stats->rx_err);
1290 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1292 fore200e_collect_rpd(fore200e, entry->rpd);
1294 /* rewrite the rpd address to ack the received PDU */
1295 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1296 *entry->status = STATUS_FREE;
1298 fore200e_supply(fore200e);
1303 #ifndef FORE200E_USE_TASKLET
1305 fore200e_irq(struct fore200e* fore200e)
1307 unsigned long flags;
1309 spin_lock_irqsave(&fore200e->q_lock, flags);
1310 fore200e_rx_irq(fore200e);
1311 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1313 spin_lock_irqsave(&fore200e->q_lock, flags);
1314 fore200e_tx_irq(fore200e);
1315 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1321 fore200e_interrupt(int irq, void* dev)
1323 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1325 if (fore200e->bus->irq_check(fore200e) == 0) {
1327 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1330 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1332 #ifdef FORE200E_USE_TASKLET
1333 tasklet_schedule(&fore200e->tx_tasklet);
1334 tasklet_schedule(&fore200e->rx_tasklet);
1336 fore200e_irq(fore200e);
1339 fore200e->bus->irq_ack(fore200e);
1344 #ifdef FORE200E_USE_TASKLET
1346 fore200e_tx_tasklet(unsigned long data)
1348 struct fore200e* fore200e = (struct fore200e*) data;
1349 unsigned long flags;
1351 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1353 spin_lock_irqsave(&fore200e->q_lock, flags);
1354 fore200e_tx_irq(fore200e);
1355 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1360 fore200e_rx_tasklet(unsigned long data)
1362 struct fore200e* fore200e = (struct fore200e*) data;
1363 unsigned long flags;
1365 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1367 spin_lock_irqsave(&fore200e->q_lock, flags);
1368 fore200e_rx_irq((struct fore200e*) data);
1369 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1375 fore200e_select_scheme(struct atm_vcc* vcc)
1377 /* fairly balance the VCs over (identical) buffer schemes */
1378 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1380 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1381 vcc->itf, vcc->vpi, vcc->vci, scheme);
1388 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1390 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1391 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1392 struct activate_opcode activ_opcode;
1393 struct deactivate_opcode deactiv_opcode;
1396 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1398 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1401 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1403 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1404 activ_opcode.aal = aal;
1405 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1406 activ_opcode.pad = 0;
1409 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1410 deactiv_opcode.pad = 0;
1413 vpvc.vci = vcc->vci;
1414 vpvc.vpi = vcc->vpi;
1416 *entry->status = STATUS_PENDING;
1420 #ifdef FORE200E_52BYTE_AAL0_SDU
1423 /* the MTU is not used by the cp, except in the case of AAL0 */
1424 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1425 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1426 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1429 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1430 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1433 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1435 *entry->status = STATUS_FREE;
1438 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1439 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1443 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1444 activate ? "open" : "clos");
1450 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1453 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1455 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1457 /* compute the data cells to idle cells ratio from the tx PCR */
1458 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1459 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1462 /* disable rate control */
1463 rate->data_cells = rate->idle_cells = 0;
1469 fore200e_open(struct atm_vcc *vcc)
1471 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1472 struct fore200e_vcc* fore200e_vcc;
1473 struct fore200e_vc_map* vc_map;
1474 unsigned long flags;
1476 short vpi = vcc->vpi;
1478 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1479 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1481 spin_lock_irqsave(&fore200e->q_lock, flags);
1483 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1486 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1488 printk(FORE200E "VC %d.%d.%d already in use\n",
1489 fore200e->atm_dev->number, vpi, vci);
1496 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1498 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1499 if (fore200e_vcc == NULL) {
1504 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1505 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1506 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1507 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1508 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1509 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1510 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1512 /* pseudo-CBR bandwidth requested? */
1513 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1515 mutex_lock(&fore200e->rate_mtx);
1516 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1517 mutex_unlock(&fore200e->rate_mtx);
1519 kfree(fore200e_vcc);
1524 /* reserve bandwidth */
1525 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1526 mutex_unlock(&fore200e->rate_mtx);
1529 vcc->itf = vcc->dev->number;
1531 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1532 set_bit(ATM_VF_ADDR, &vcc->flags);
1534 vcc->dev_data = fore200e_vcc;
1536 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1540 clear_bit(ATM_VF_ADDR, &vcc->flags);
1541 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1543 vcc->dev_data = NULL;
1545 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1547 kfree(fore200e_vcc);
1551 /* compute rate control parameters */
1552 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1554 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1555 set_bit(ATM_VF_HASQOS, &vcc->flags);
1557 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1558 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1559 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1560 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1563 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1564 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1565 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1567 /* new incarnation of the vcc */
1568 vc_map->incarn = ++fore200e->incarn_count;
1570 /* VC unusable before this flag is set */
1571 set_bit(ATM_VF_READY, &vcc->flags);
1578 fore200e_close(struct atm_vcc* vcc)
1580 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1581 struct fore200e_vcc* fore200e_vcc;
1582 struct fore200e_vc_map* vc_map;
1583 unsigned long flags;
1586 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1587 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1589 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1591 clear_bit(ATM_VF_READY, &vcc->flags);
1593 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1595 spin_lock_irqsave(&fore200e->q_lock, flags);
1597 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1599 /* the vc is no longer considered as "in use" by fore200e_open() */
1602 vcc->itf = vcc->vci = vcc->vpi = 0;
1604 fore200e_vcc = FORE200E_VCC(vcc);
1605 vcc->dev_data = NULL;
1607 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1609 /* release reserved bandwidth, if any */
1610 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1612 mutex_lock(&fore200e->rate_mtx);
1613 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1614 mutex_unlock(&fore200e->rate_mtx);
1616 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1619 clear_bit(ATM_VF_ADDR, &vcc->flags);
1620 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1622 ASSERT(fore200e_vcc);
1623 kfree(fore200e_vcc);
1628 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1630 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1631 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1632 struct fore200e_vc_map* vc_map;
1633 struct host_txq* txq = &fore200e->host_txq;
1634 struct host_txq_entry* entry;
1636 struct tpd_haddr tpd_haddr;
1637 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1639 int tx_len = skb->len;
1640 u32* cell_header = NULL;
1641 unsigned char* skb_data;
1643 unsigned char* data;
1644 unsigned long flags;
1647 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1649 ASSERT(fore200e_vcc);
1651 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1652 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1653 dev_kfree_skb_any(skb);
1657 #ifdef FORE200E_52BYTE_AAL0_SDU
1658 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1659 cell_header = (u32*) skb->data;
1660 skb_data = skb->data + 4; /* skip 4-byte cell header */
1661 skb_len = tx_len = skb->len - 4;
1663 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1668 skb_data = skb->data;
1672 if (((unsigned long)skb_data) & 0x3) {
1674 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1679 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1681 /* this simply NUKES the PCA board */
1682 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1684 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1688 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1694 dev_kfree_skb_any(skb);
1699 memcpy(data, skb_data, skb_len);
1700 if (skb_len < tx_len)
1701 memset(data + skb_len, 0x00, tx_len - skb_len);
1707 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1708 ASSERT(vc_map->vcc == vcc);
1712 spin_lock_irqsave(&fore200e->q_lock, flags);
1714 entry = &txq->host_entry[ txq->head ];
1716 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1718 /* try to free completed tx queue entries */
1719 fore200e_tx_irq(fore200e);
1721 if (*entry->status != STATUS_FREE) {
1723 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1725 /* retry once again? */
1731 atomic_inc(&vcc->stats->tx_err);
1734 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1735 fore200e->name, fore200e->cp_queues->heartbeat);
1740 dev_kfree_skb_any(skb);
1750 entry->incarn = vc_map->incarn;
1751 entry->vc_map = vc_map;
1753 entry->data = tx_copy ? data : NULL;
1756 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1757 tpd->tsd[ 0 ].length = tx_len;
1759 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1762 /* The dma_map call above implies a dma_sync so the device can use it,
1763 * thus no explicit dma_sync call is necessary here.
1766 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1767 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1768 tpd->tsd[0].length, skb_len);
1770 if (skb_len < fore200e_vcc->tx_min_pdu)
1771 fore200e_vcc->tx_min_pdu = skb_len;
1772 if (skb_len > fore200e_vcc->tx_max_pdu)
1773 fore200e_vcc->tx_max_pdu = skb_len;
1774 fore200e_vcc->tx_pdu++;
1776 /* set tx rate control information */
1777 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1778 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1781 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1782 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1783 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1784 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1785 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1788 /* set the ATM header, common to all cells conveying the PDU */
1789 tpd->atm_header.clp = 0;
1790 tpd->atm_header.plt = 0;
1791 tpd->atm_header.vci = vcc->vci;
1792 tpd->atm_header.vpi = vcc->vpi;
1793 tpd->atm_header.gfc = 0;
1796 tpd->spec.length = tx_len;
1798 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1801 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1803 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1805 *entry->status = STATUS_PENDING;
1806 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1808 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1815 fore200e_getstats(struct fore200e* fore200e)
1817 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1818 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1819 struct stats_opcode opcode;
1823 if (fore200e->stats == NULL) {
1824 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1825 if (fore200e->stats == NULL)
1829 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1830 sizeof(struct stats), DMA_FROM_DEVICE);
1832 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1834 opcode.opcode = OPCODE_GET_STATS;
1837 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1839 *entry->status = STATUS_PENDING;
1841 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1843 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1845 *entry->status = STATUS_FREE;
1847 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1850 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1859 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1861 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1863 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1864 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1871 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1873 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1875 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1876 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1882 #if 0 /* currently unused */
1884 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1886 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1887 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1888 struct oc3_opcode opcode;
1890 u32 oc3_regs_dma_addr;
1892 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1894 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1896 opcode.opcode = OPCODE_GET_OC3;
1901 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1903 *entry->status = STATUS_PENDING;
1905 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1907 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1909 *entry->status = STATUS_FREE;
1911 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1914 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1924 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1926 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1927 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1928 struct oc3_opcode opcode;
1931 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1933 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1935 opcode.opcode = OPCODE_SET_OC3;
1937 opcode.value = value;
1940 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1942 *entry->status = STATUS_PENDING;
1944 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1946 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1948 *entry->status = STATUS_FREE;
1951 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1960 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1962 u32 mct_value, mct_mask;
1965 if (!capable(CAP_NET_ADMIN))
1968 switch (loop_mode) {
1972 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1975 case ATM_LM_LOC_PHY:
1976 mct_value = mct_mask = SUNI_MCT_DLE;
1979 case ATM_LM_RMT_PHY:
1980 mct_value = mct_mask = SUNI_MCT_LLE;
1987 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1989 fore200e->loop_mode = loop_mode;
1996 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1998 struct sonet_stats tmp;
2000 if (fore200e_getstats(fore200e) < 0)
2003 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
2004 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
2005 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
2006 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
2007 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
2008 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
2009 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
2010 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
2011 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
2012 be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
2013 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) +
2014 be32_to_cpu(fore200e->stats->aal34.cells_received) +
2015 be32_to_cpu(fore200e->stats->aal5.cells_received);
2018 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2025 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2027 struct fore200e* fore200e = FORE200E_DEV(dev);
2029 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2034 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2037 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2040 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2043 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2046 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2049 return -ENOSYS; /* not implemented */
2054 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2056 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2057 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2059 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2060 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2064 DPRINTK(2, "change_qos %d.%d.%d, "
2065 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2066 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2067 "available_cell_rate = %u",
2068 vcc->itf, vcc->vpi, vcc->vci,
2069 fore200e_traffic_class[ qos->txtp.traffic_class ],
2070 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2071 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2072 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2073 flags, fore200e->available_cell_rate);
2075 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2077 mutex_lock(&fore200e->rate_mtx);
2078 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2079 mutex_unlock(&fore200e->rate_mtx);
2083 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2084 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2086 mutex_unlock(&fore200e->rate_mtx);
2088 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2090 /* update rate control parameters */
2091 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2093 set_bit(ATM_VF_HASQOS, &vcc->flags);
2102 static int __devinit
2103 fore200e_irq_request(struct fore200e* fore200e)
2105 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2107 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2108 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2112 printk(FORE200E "IRQ %s reserved for device %s\n",
2113 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2115 #ifdef FORE200E_USE_TASKLET
2116 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2117 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2120 fore200e->state = FORE200E_STATE_IRQ;
2125 static int __devinit
2126 fore200e_get_esi(struct fore200e* fore200e)
2128 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2134 ok = fore200e->bus->prom_read(fore200e, prom);
2140 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2142 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2143 prom->serial_number & 0xFFFF,
2144 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2145 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2147 for (i = 0; i < ESI_LEN; i++) {
2148 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2157 static int __devinit
2158 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2160 int scheme, magn, nbr, size, i;
2162 struct host_bsq* bsq;
2163 struct buffer* buffer;
2165 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2166 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2168 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2170 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2171 size = fore200e_rx_buf_size[ scheme ][ magn ];
2173 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2175 /* allocate the array of receive buffers */
2176 buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2181 bsq->freebuf = NULL;
2183 for (i = 0; i < nbr; i++) {
2185 buffer[ i ].scheme = scheme;
2186 buffer[ i ].magn = magn;
2187 #ifdef FORE200E_BSQ_DEBUG
2188 buffer[ i ].index = i;
2189 buffer[ i ].supplied = 0;
2192 /* allocate the receive buffer body */
2193 if (fore200e_chunk_alloc(fore200e,
2194 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2195 DMA_FROM_DEVICE) < 0) {
2198 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2204 /* insert the buffer into the free buffer list */
2205 buffer[ i ].next = bsq->freebuf;
2206 bsq->freebuf = &buffer[ i ];
2208 /* all the buffers are free, initially */
2209 bsq->freebuf_count = nbr;
2211 #ifdef FORE200E_BSQ_DEBUG
2212 bsq_audit(3, bsq, scheme, magn);
2217 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2222 static int __devinit
2223 fore200e_init_bs_queue(struct fore200e* fore200e)
2225 int scheme, magn, i;
2227 struct host_bsq* bsq;
2228 struct cp_bsq_entry __iomem * cp_entry;
2230 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2231 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2233 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2235 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2237 /* allocate and align the array of status words */
2238 if (fore200e->bus->dma_chunk_alloc(fore200e,
2240 sizeof(enum status),
2242 fore200e->bus->status_alignment) < 0) {
2246 /* allocate and align the array of receive buffer descriptors */
2247 if (fore200e->bus->dma_chunk_alloc(fore200e,
2249 sizeof(struct rbd_block),
2251 fore200e->bus->descr_alignment) < 0) {
2253 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2257 /* get the base address of the cp resident buffer supply queue entries */
2258 cp_entry = fore200e->virt_base +
2259 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2261 /* fill the host resident and cp resident buffer supply queue entries */
2262 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2264 bsq->host_entry[ i ].status =
2265 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2266 bsq->host_entry[ i ].rbd_block =
2267 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2268 bsq->host_entry[ i ].rbd_block_dma =
2269 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2270 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2272 *bsq->host_entry[ i ].status = STATUS_FREE;
2274 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2275 &cp_entry[ i ].status_haddr);
2280 fore200e->state = FORE200E_STATE_INIT_BSQ;
2285 static int __devinit
2286 fore200e_init_rx_queue(struct fore200e* fore200e)
2288 struct host_rxq* rxq = &fore200e->host_rxq;
2289 struct cp_rxq_entry __iomem * cp_entry;
2292 DPRINTK(2, "receive queue is being initialized\n");
2294 /* allocate and align the array of status words */
2295 if (fore200e->bus->dma_chunk_alloc(fore200e,
2297 sizeof(enum status),
2299 fore200e->bus->status_alignment) < 0) {
2303 /* allocate and align the array of receive PDU descriptors */
2304 if (fore200e->bus->dma_chunk_alloc(fore200e,
2308 fore200e->bus->descr_alignment) < 0) {
2310 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2314 /* get the base address of the cp resident rx queue entries */
2315 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2317 /* fill the host resident and cp resident rx entries */
2318 for (i=0; i < QUEUE_SIZE_RX; i++) {
2320 rxq->host_entry[ i ].status =
2321 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2322 rxq->host_entry[ i ].rpd =
2323 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2324 rxq->host_entry[ i ].rpd_dma =
2325 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2326 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2328 *rxq->host_entry[ i ].status = STATUS_FREE;
2330 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2331 &cp_entry[ i ].status_haddr);
2333 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2334 &cp_entry[ i ].rpd_haddr);
2337 /* set the head entry of the queue */
2340 fore200e->state = FORE200E_STATE_INIT_RXQ;
2345 static int __devinit
2346 fore200e_init_tx_queue(struct fore200e* fore200e)
2348 struct host_txq* txq = &fore200e->host_txq;
2349 struct cp_txq_entry __iomem * cp_entry;
2352 DPRINTK(2, "transmit queue is being initialized\n");
2354 /* allocate and align the array of status words */
2355 if (fore200e->bus->dma_chunk_alloc(fore200e,
2357 sizeof(enum status),
2359 fore200e->bus->status_alignment) < 0) {
2363 /* allocate and align the array of transmit PDU descriptors */
2364 if (fore200e->bus->dma_chunk_alloc(fore200e,
2368 fore200e->bus->descr_alignment) < 0) {
2370 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2374 /* get the base address of the cp resident tx queue entries */
2375 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2377 /* fill the host resident and cp resident tx entries */
2378 for (i=0; i < QUEUE_SIZE_TX; i++) {
2380 txq->host_entry[ i ].status =
2381 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2382 txq->host_entry[ i ].tpd =
2383 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2384 txq->host_entry[ i ].tpd_dma =
2385 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2386 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2388 *txq->host_entry[ i ].status = STATUS_FREE;
2390 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2391 &cp_entry[ i ].status_haddr);
2393 /* although there is a one-to-one mapping of tx queue entries and tpds,
2394 we do not write here the DMA (physical) base address of each tpd into
2395 the related cp resident entry, because the cp relies on this write
2396 operation to detect that a new pdu has been submitted for tx */
2399 /* set the head and tail entries of the queue */
2403 fore200e->state = FORE200E_STATE_INIT_TXQ;
2408 static int __devinit
2409 fore200e_init_cmd_queue(struct fore200e* fore200e)
2411 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2412 struct cp_cmdq_entry __iomem * cp_entry;
2415 DPRINTK(2, "command queue is being initialized\n");
2417 /* allocate and align the array of status words */
2418 if (fore200e->bus->dma_chunk_alloc(fore200e,
2420 sizeof(enum status),
2422 fore200e->bus->status_alignment) < 0) {
2426 /* get the base address of the cp resident cmd queue entries */
2427 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2429 /* fill the host resident and cp resident cmd entries */
2430 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2432 cmdq->host_entry[ i ].status =
2433 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2434 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2436 *cmdq->host_entry[ i ].status = STATUS_FREE;
2438 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2439 &cp_entry[ i ].status_haddr);
2442 /* set the head entry of the queue */
2445 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2450 static void __devinit
2451 fore200e_param_bs_queue(struct fore200e* fore200e,
2452 enum buffer_scheme scheme, enum buffer_magn magn,
2453 int queue_length, int pool_size, int supply_blksize)
2455 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2457 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2458 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2459 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2460 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2464 static int __devinit
2465 fore200e_initialize(struct fore200e* fore200e)
2467 struct cp_queues __iomem * cpq;
2468 int ok, scheme, magn;
2470 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2472 mutex_init(&fore200e->rate_mtx);
2473 spin_lock_init(&fore200e->q_lock);
2475 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2477 /* enable cp to host interrupts */
2478 fore200e->bus->write(1, &cpq->imask);
2480 if (fore200e->bus->irq_enable)
2481 fore200e->bus->irq_enable(fore200e);
2483 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2485 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2486 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2487 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2489 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2490 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2492 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2493 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2494 fore200e_param_bs_queue(fore200e, scheme, magn,
2496 fore200e_rx_buf_nbr[ scheme ][ magn ],
2499 /* issue the initialize command */
2500 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2501 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2503 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2505 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2509 printk(FORE200E "device %s initialized\n", fore200e->name);
2511 fore200e->state = FORE200E_STATE_INITIALIZE;
2516 static void __devinit
2517 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2519 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2524 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2528 static int __devinit
2529 fore200e_monitor_getc(struct fore200e* fore200e)
2531 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2532 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2535 while (time_before(jiffies, timeout)) {
2537 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2539 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2541 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2543 printk("%c", c & 0xFF);
2553 static void __devinit
2554 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2558 /* the i960 monitor doesn't accept any new character if it has something to say */
2559 while (fore200e_monitor_getc(fore200e) >= 0);
2561 fore200e_monitor_putc(fore200e, *str++);
2564 while (fore200e_monitor_getc(fore200e) >= 0);
2567 #ifdef __LITTLE_ENDIAN
2568 #define FW_EXT ".bin"
2570 #define FW_EXT "_ecd.bin2"
2573 static int __devinit
2574 fore200e_load_and_start_fw(struct fore200e* fore200e)
2576 const struct firmware *firmware;
2577 struct device *device;
2578 struct fw_header *fw_header;
2579 const __le32 *fw_data;
2581 u32 __iomem *load_addr;
2585 if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0)
2586 device = &((struct pci_dev *) fore200e->bus_dev)->dev;
2588 else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0)
2589 device = &((struct sbus_dev *) fore200e->bus_dev)->ofdev.dev;
2594 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2595 if (request_firmware(&firmware, buf, device) == 1) {
2596 printk(FORE200E "missing %s firmware image\n", fore200e->bus->model_name);
2600 fw_data = (__le32 *) firmware->data;
2601 fw_size = firmware->size / sizeof(u32);
2602 fw_header = (struct fw_header *) firmware->data;
2603 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2605 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2606 fore200e->name, load_addr, fw_size);
2608 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2609 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2613 for (; fw_size--; fw_data++, load_addr++)
2614 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2616 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2618 #if defined(__sparc_v9__)
2619 /* reported to be required by SBA cards on some sparc64 hosts */
2623 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2624 fore200e_monitor_puts(fore200e, buf);
2626 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2627 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2631 printk(FORE200E "device %s firmware started\n", fore200e->name);
2633 fore200e->state = FORE200E_STATE_START_FW;
2637 release_firmware(firmware);
2642 static int __devinit
2643 fore200e_register(struct fore200e* fore200e)
2645 struct atm_dev* atm_dev;
2647 DPRINTK(2, "device %s being registered\n", fore200e->name);
2649 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2651 if (atm_dev == NULL) {
2652 printk(FORE200E "unable to register device %s\n", fore200e->name);
2656 atm_dev->dev_data = fore200e;
2657 fore200e->atm_dev = atm_dev;
2659 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2660 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2662 fore200e->available_cell_rate = ATM_OC3_PCR;
2664 fore200e->state = FORE200E_STATE_REGISTER;
2669 static int __devinit
2670 fore200e_init(struct fore200e* fore200e)
2672 if (fore200e_register(fore200e) < 0)
2675 if (fore200e->bus->configure(fore200e) < 0)
2678 if (fore200e->bus->map(fore200e) < 0)
2681 if (fore200e_reset(fore200e, 1) < 0)
2684 if (fore200e_load_and_start_fw(fore200e) < 0)
2687 if (fore200e_initialize(fore200e) < 0)
2690 if (fore200e_init_cmd_queue(fore200e) < 0)
2693 if (fore200e_init_tx_queue(fore200e) < 0)
2696 if (fore200e_init_rx_queue(fore200e) < 0)
2699 if (fore200e_init_bs_queue(fore200e) < 0)
2702 if (fore200e_alloc_rx_buf(fore200e) < 0)
2705 if (fore200e_get_esi(fore200e) < 0)
2708 if (fore200e_irq_request(fore200e) < 0)
2711 fore200e_supply(fore200e);
2713 /* all done, board initialization is now complete */
2714 fore200e->state = FORE200E_STATE_COMPLETE;
2719 static int __devinit
2720 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2722 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2723 struct fore200e* fore200e;
2725 static int index = 0;
2727 if (pci_enable_device(pci_dev)) {
2732 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2733 if (fore200e == NULL) {
2738 fore200e->bus = bus;
2739 fore200e->bus_dev = pci_dev;
2740 fore200e->irq = pci_dev->irq;
2741 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2743 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2745 pci_set_master(pci_dev);
2747 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2748 fore200e->bus->model_name,
2749 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2751 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2753 err = fore200e_init(fore200e);
2755 fore200e_shutdown(fore200e);
2760 pci_set_drvdata(pci_dev, fore200e);
2768 pci_disable_device(pci_dev);
2773 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2775 struct fore200e *fore200e;
2777 fore200e = pci_get_drvdata(pci_dev);
2779 fore200e_shutdown(fore200e);
2781 pci_disable_device(pci_dev);
2785 static struct pci_device_id fore200e_pca_tbl[] = {
2786 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2787 0, 0, (unsigned long) &fore200e_bus[0] },
2791 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2793 static struct pci_driver fore200e_pca_driver = {
2794 .name = "fore_200e",
2795 .probe = fore200e_pca_detect,
2796 .remove = __devexit_p(fore200e_pca_remove_one),
2797 .id_table = fore200e_pca_tbl,
2803 fore200e_module_init(void)
2805 const struct fore200e_bus* bus;
2806 struct fore200e* fore200e;
2809 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2811 /* for each configured bus interface */
2812 for (bus = fore200e_bus; bus->model_name; bus++) {
2814 /* detect all boards present on that bus */
2815 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2817 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2818 fore200e->bus->model_name,
2819 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2821 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2823 if (fore200e_init(fore200e) < 0) {
2825 fore200e_shutdown(fore200e);
2829 list_add(&fore200e->entry, &fore200e_boards);
2834 if (!pci_register_driver(&fore200e_pca_driver))
2838 if (!list_empty(&fore200e_boards))
2846 fore200e_module_cleanup(void)
2848 struct fore200e *fore200e, *next;
2851 pci_unregister_driver(&fore200e_pca_driver);
2854 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2855 fore200e_shutdown(fore200e);
2858 DPRINTK(1, "module being removed\n");
2863 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2865 struct fore200e* fore200e = FORE200E_DEV(dev);
2866 struct fore200e_vcc* fore200e_vcc;
2867 struct atm_vcc* vcc;
2868 int i, len, left = *pos;
2869 unsigned long flags;
2873 if (fore200e_getstats(fore200e) < 0)
2876 len = sprintf(page,"\n"
2878 " internal name:\t\t%s\n", fore200e->name);
2880 /* print bus-specific information */
2881 if (fore200e->bus->proc_read)
2882 len += fore200e->bus->proc_read(fore200e, page + len);
2884 len += sprintf(page + len,
2885 " interrupt line:\t\t%s\n"
2886 " physical base address:\t0x%p\n"
2887 " virtual base address:\t0x%p\n"
2888 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2889 " board serial number:\t\t%d\n\n",
2890 fore200e_irq_itoa(fore200e->irq),
2891 (void*)fore200e->phys_base,
2892 fore200e->virt_base,
2893 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2894 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2895 fore200e->esi[4] * 256 + fore200e->esi[5]);
2901 return sprintf(page,
2902 " free small bufs, scheme 1:\t%d\n"
2903 " free large bufs, scheme 1:\t%d\n"
2904 " free small bufs, scheme 2:\t%d\n"
2905 " free large bufs, scheme 2:\t%d\n",
2906 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2907 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2908 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2909 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2912 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2914 len = sprintf(page,"\n\n"
2915 " cell processor:\n"
2916 " heartbeat state:\t\t");
2918 if (hb >> 16 != 0xDEAD)
2919 len += sprintf(page + len, "0x%08x\n", hb);
2921 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2927 static const char* media_name[] = {
2928 "unshielded twisted pair",
2929 "multimode optical fiber ST",
2930 "multimode optical fiber SC",
2931 "single-mode optical fiber ST",
2932 "single-mode optical fiber SC",
2936 static const char* oc3_mode[] = {
2938 "diagnostic loopback",
2943 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2944 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2945 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2946 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2949 if ((media_index < 0) || (media_index > 4))
2952 switch (fore200e->loop_mode) {
2953 case ATM_LM_NONE: oc3_index = 0;
2955 case ATM_LM_LOC_PHY: oc3_index = 1;
2957 case ATM_LM_RMT_PHY: oc3_index = 2;
2959 default: oc3_index = 3;
2962 return sprintf(page,
2963 " firmware release:\t\t%d.%d.%d\n"
2964 " monitor release:\t\t%d.%d\n"
2965 " media type:\t\t\t%s\n"
2966 " OC-3 revision:\t\t0x%x\n"
2967 " OC-3 mode:\t\t\t%s",
2968 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2969 mon960_release >> 16, mon960_release << 16 >> 16,
2970 media_name[ media_index ],
2972 oc3_mode[ oc3_index ]);
2976 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2978 return sprintf(page,
2981 " version number:\t\t%d\n"
2982 " boot status word:\t\t0x%08x\n",
2983 fore200e->bus->read(&cp_monitor->mon_version),
2984 fore200e->bus->read(&cp_monitor->bstat));
2988 return sprintf(page,
2990 " device statistics:\n"
2992 " crc_header_errors:\t\t%10u\n"
2993 " framing_errors:\t\t%10u\n",
2994 be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2995 be32_to_cpu(fore200e->stats->phy.framing_errors));
2998 return sprintf(page, "\n"
3000 " section_bip8_errors:\t%10u\n"
3001 " path_bip8_errors:\t\t%10u\n"
3002 " line_bip24_errors:\t\t%10u\n"
3003 " line_febe_errors:\t\t%10u\n"
3004 " path_febe_errors:\t\t%10u\n"
3005 " corr_hcs_errors:\t\t%10u\n"
3006 " ucorr_hcs_errors:\t\t%10u\n",
3007 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
3008 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
3009 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
3010 be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
3011 be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
3012 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
3013 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
3016 return sprintf(page,"\n"
3017 " ATM:\t\t\t\t cells\n"
3020 " vpi out of range:\t\t%10u\n"
3021 " vpi no conn:\t\t%10u\n"
3022 " vci out of range:\t\t%10u\n"
3023 " vci no conn:\t\t%10u\n",
3024 be32_to_cpu(fore200e->stats->atm.cells_transmitted),
3025 be32_to_cpu(fore200e->stats->atm.cells_received),
3026 be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
3027 be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
3028 be32_to_cpu(fore200e->stats->atm.vci_bad_range),
3029 be32_to_cpu(fore200e->stats->atm.vci_no_conn));
3032 return sprintf(page,"\n"
3033 " AAL0:\t\t\t cells\n"
3036 " dropped:\t\t\t%10u\n",
3037 be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
3038 be32_to_cpu(fore200e->stats->aal0.cells_received),
3039 be32_to_cpu(fore200e->stats->aal0.cells_dropped));
3042 return sprintf(page,"\n"
3044 " SAR sublayer:\t\t cells\n"
3047 " dropped:\t\t\t%10u\n"
3048 " CRC errors:\t\t%10u\n"
3049 " protocol errors:\t\t%10u\n\n"
3050 " CS sublayer:\t\t PDUs\n"
3053 " dropped:\t\t\t%10u\n"
3054 " protocol errors:\t\t%10u\n",
3055 be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
3056 be32_to_cpu(fore200e->stats->aal34.cells_received),
3057 be32_to_cpu(fore200e->stats->aal34.cells_dropped),
3058 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
3059 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
3060 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
3061 be32_to_cpu(fore200e->stats->aal34.cspdus_received),
3062 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
3063 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
3066 return sprintf(page,"\n"
3068 " SAR sublayer:\t\t cells\n"
3071 " dropped:\t\t\t%10u\n"
3072 " congestions:\t\t%10u\n\n"
3073 " CS sublayer:\t\t PDUs\n"
3076 " dropped:\t\t\t%10u\n"
3077 " CRC errors:\t\t%10u\n"
3078 " protocol errors:\t\t%10u\n",
3079 be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
3080 be32_to_cpu(fore200e->stats->aal5.cells_received),
3081 be32_to_cpu(fore200e->stats->aal5.cells_dropped),
3082 be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
3083 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
3084 be32_to_cpu(fore200e->stats->aal5.cspdus_received),
3085 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
3086 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
3087 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
3090 return sprintf(page,"\n"
3091 " AUX:\t\t allocation failures\n"
3092 " small b1:\t\t\t%10u\n"
3093 " large b1:\t\t\t%10u\n"
3094 " small b2:\t\t\t%10u\n"
3095 " large b2:\t\t\t%10u\n"
3096 " RX PDUs:\t\t\t%10u\n"
3097 " TX PDUs:\t\t\t%10lu\n",
3098 be32_to_cpu(fore200e->stats->aux.small_b1_failed),
3099 be32_to_cpu(fore200e->stats->aux.large_b1_failed),
3100 be32_to_cpu(fore200e->stats->aux.small_b2_failed),
3101 be32_to_cpu(fore200e->stats->aux.large_b2_failed),
3102 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
3106 return sprintf(page,"\n"
3107 " receive carrier:\t\t\t%s\n",
3108 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3111 return sprintf(page,"\n"
3112 " VCCs:\n address VPI VCI AAL "
3113 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3116 for (i = 0; i < NBR_CONNECT; i++) {
3118 vcc = fore200e->vc_map[i].vcc;
3123 spin_lock_irqsave(&fore200e->q_lock, flags);
3125 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3127 fore200e_vcc = FORE200E_VCC(vcc);
3128 ASSERT(fore200e_vcc);
3131 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3132 (u32)(unsigned long)vcc,
3133 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3134 fore200e_vcc->tx_pdu,
3135 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3136 fore200e_vcc->tx_max_pdu,
3137 fore200e_vcc->rx_pdu,
3138 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3139 fore200e_vcc->rx_max_pdu);
3141 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3145 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3151 module_init(fore200e_module_init);
3152 module_exit(fore200e_module_cleanup);
3155 static const struct atmdev_ops fore200e_ops =
3157 .open = fore200e_open,
3158 .close = fore200e_close,
3159 .ioctl = fore200e_ioctl,
3160 .getsockopt = fore200e_getsockopt,
3161 .setsockopt = fore200e_setsockopt,
3162 .send = fore200e_send,
3163 .change_qos = fore200e_change_qos,
3164 .proc_read = fore200e_proc_read,
3165 .owner = THIS_MODULE
3169 static const struct fore200e_bus fore200e_bus[] = {
3171 { "PCA-200E", "pca200e", 32, 4, 32,
3174 fore200e_pca_dma_map,
3175 fore200e_pca_dma_unmap,
3176 fore200e_pca_dma_sync_for_cpu,
3177 fore200e_pca_dma_sync_for_device,
3178 fore200e_pca_dma_chunk_alloc,
3179 fore200e_pca_dma_chunk_free,
3181 fore200e_pca_configure,
3184 fore200e_pca_prom_read,
3187 fore200e_pca_irq_check,
3188 fore200e_pca_irq_ack,
3189 fore200e_pca_proc_read,
3193 { "SBA-200E", "sba200e", 32, 64, 32,
3196 fore200e_sba_dma_map,
3197 fore200e_sba_dma_unmap,
3198 fore200e_sba_dma_sync_for_cpu,
3199 fore200e_sba_dma_sync_for_device,
3200 fore200e_sba_dma_chunk_alloc,
3201 fore200e_sba_dma_chunk_free,
3202 fore200e_sba_detect,
3203 fore200e_sba_configure,
3206 fore200e_sba_prom_read,
3208 fore200e_sba_irq_enable,
3209 fore200e_sba_irq_check,
3210 fore200e_sba_irq_ack,
3211 fore200e_sba_proc_read,
3217 MODULE_LICENSE("GPL");
3219 #ifdef __LITTLE_ENDIAN__
3220 MODULE_FIRMWARE("pca200e.bin");
3222 MODULE_FIRMWARE("pca200e_ecd.bin2");
3224 #endif /* CONFIG_PCI */
3226 MODULE_FIRMWARE("sba200e_ecd.bin2");