drivers/net: Remove pointless checks for NULL prior to calling kfree()
[safe/jmp/linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_sz: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
59
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
62 #include <asm/io.h>
63
64 /* local include */
65 #include "s2io.h"
66 #include "s2io-regs.h"
67
68 #define DRV_VERSION "Version 2.0.9.1"
69
70 /* S2io Driver name & version. */
71 static char s2io_driver_name[] = "Neterion";
72 static char s2io_driver_version[] = DRV_VERSION;
73
74 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75 {
76         int ret;
77
78         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
79                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
80
81         return ret;
82 }
83
84 /*
85  * Cards with following subsystem_id have a link state indication
86  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
87  * macro below identifies these cards given the subsystem_id.
88  */
89 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
90         (dev_type == XFRAME_I_DEVICE) ?                 \
91                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
92                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
93
94 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
95                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
96 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
97 #define PANIC   1
98 #define LOW     2
99 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
100 {
101         int level = 0;
102         mac_info_t *mac_control;
103
104         mac_control = &sp->mac_control;
105         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
106                 level = LOW;
107                 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
108                         level = PANIC;
109                 }
110         }
111
112         return level;
113 }
114
115 /* Ethtool related variables and Macros. */
116 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
117         "Register test\t(offline)",
118         "Eeprom test\t(offline)",
119         "Link test\t(online)",
120         "RLDRAM test\t(offline)",
121         "BIST Test\t(offline)"
122 };
123
124 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
125         {"tmac_frms"},
126         {"tmac_data_octets"},
127         {"tmac_drop_frms"},
128         {"tmac_mcst_frms"},
129         {"tmac_bcst_frms"},
130         {"tmac_pause_ctrl_frms"},
131         {"tmac_any_err_frms"},
132         {"tmac_vld_ip_octets"},
133         {"tmac_vld_ip"},
134         {"tmac_drop_ip"},
135         {"tmac_icmp"},
136         {"tmac_rst_tcp"},
137         {"tmac_tcp"},
138         {"tmac_udp"},
139         {"rmac_vld_frms"},
140         {"rmac_data_octets"},
141         {"rmac_fcs_err_frms"},
142         {"rmac_drop_frms"},
143         {"rmac_vld_mcst_frms"},
144         {"rmac_vld_bcst_frms"},
145         {"rmac_in_rng_len_err_frms"},
146         {"rmac_long_frms"},
147         {"rmac_pause_ctrl_frms"},
148         {"rmac_discarded_frms"},
149         {"rmac_usized_frms"},
150         {"rmac_osized_frms"},
151         {"rmac_frag_frms"},
152         {"rmac_jabber_frms"},
153         {"rmac_ip"},
154         {"rmac_ip_octets"},
155         {"rmac_hdr_err_ip"},
156         {"rmac_drop_ip"},
157         {"rmac_icmp"},
158         {"rmac_tcp"},
159         {"rmac_udp"},
160         {"rmac_err_drp_udp"},
161         {"rmac_pause_cnt"},
162         {"rmac_accepted_ip"},
163         {"rmac_err_tcp"},
164         {"\n DRIVER STATISTICS"},
165         {"single_bit_ecc_errs"},
166         {"double_bit_ecc_errs"},
167 };
168
169 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
170 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
171
172 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
173 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
174
175 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
176                         init_timer(&timer);                     \
177                         timer.function = handle;                \
178                         timer.data = (unsigned long) arg;       \
179                         mod_timer(&timer, (jiffies + exp))      \
180
181 /* Add the vlan */
182 static void s2io_vlan_rx_register(struct net_device *dev,
183                                         struct vlan_group *grp)
184 {
185         nic_t *nic = dev->priv;
186         unsigned long flags;
187
188         spin_lock_irqsave(&nic->tx_lock, flags);
189         nic->vlgrp = grp;
190         spin_unlock_irqrestore(&nic->tx_lock, flags);
191 }
192
193 /* Unregister the vlan */
194 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
195 {
196         nic_t *nic = dev->priv;
197         unsigned long flags;
198
199         spin_lock_irqsave(&nic->tx_lock, flags);
200         if (nic->vlgrp)
201                 nic->vlgrp->vlan_devices[vid] = NULL;
202         spin_unlock_irqrestore(&nic->tx_lock, flags);
203 }
204
205 /*
206  * Constants to be programmed into the Xena's registers, to configure
207  * the XAUI.
208  */
209
210 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
211 #define END_SIGN        0x0
212
213 static u64 herc_act_dtx_cfg[] = {
214         /* Set address */
215         0x8000051536750000ULL, 0x80000515367500E0ULL,
216         /* Write data */
217         0x8000051536750004ULL, 0x80000515367500E4ULL,
218         /* Set address */
219         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
220         /* Write data */
221         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
222         /* Set address */
223         0x801205150D440000ULL, 0x801205150D4400E0ULL,
224         /* Write data */
225         0x801205150D440004ULL, 0x801205150D4400E4ULL,
226         /* Set address */
227         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
228         /* Write data */
229         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
230         /* Done */
231         END_SIGN
232 };
233
234 static u64 xena_mdio_cfg[] = {
235         /* Reset PMA PLL */
236         0xC001010000000000ULL, 0xC0010100000000E0ULL,
237         0xC0010100008000E4ULL,
238         /* Remove Reset from PMA PLL */
239         0xC001010000000000ULL, 0xC0010100000000E0ULL,
240         0xC0010100000000E4ULL,
241         END_SIGN
242 };
243
244 static u64 xena_dtx_cfg[] = {
245         0x8000051500000000ULL, 0x80000515000000E0ULL,
246         0x80000515D93500E4ULL, 0x8001051500000000ULL,
247         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
248         0x8002051500000000ULL, 0x80020515000000E0ULL,
249         0x80020515F21000E4ULL,
250         /* Set PADLOOPBACKN */
251         0x8002051500000000ULL, 0x80020515000000E0ULL,
252         0x80020515B20000E4ULL, 0x8003051500000000ULL,
253         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
254         0x8004051500000000ULL, 0x80040515000000E0ULL,
255         0x80040515B20000E4ULL, 0x8005051500000000ULL,
256         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
257         SWITCH_SIGN,
258         /* Remove PADLOOPBACKN */
259         0x8002051500000000ULL, 0x80020515000000E0ULL,
260         0x80020515F20000E4ULL, 0x8003051500000000ULL,
261         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
262         0x8004051500000000ULL, 0x80040515000000E0ULL,
263         0x80040515F20000E4ULL, 0x8005051500000000ULL,
264         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
265         END_SIGN
266 };
267
268 /*
269  * Constants for Fixing the MacAddress problem seen mostly on
270  * Alpha machines.
271  */
272 static u64 fix_mac[] = {
273         0x0060000000000000ULL, 0x0060600000000000ULL,
274         0x0040600000000000ULL, 0x0000600000000000ULL,
275         0x0020600000000000ULL, 0x0060600000000000ULL,
276         0x0020600000000000ULL, 0x0060600000000000ULL,
277         0x0020600000000000ULL, 0x0060600000000000ULL,
278         0x0020600000000000ULL, 0x0060600000000000ULL,
279         0x0020600000000000ULL, 0x0060600000000000ULL,
280         0x0020600000000000ULL, 0x0060600000000000ULL,
281         0x0020600000000000ULL, 0x0060600000000000ULL,
282         0x0020600000000000ULL, 0x0060600000000000ULL,
283         0x0020600000000000ULL, 0x0060600000000000ULL,
284         0x0020600000000000ULL, 0x0060600000000000ULL,
285         0x0020600000000000ULL, 0x0000600000000000ULL,
286         0x0040600000000000ULL, 0x0060600000000000ULL,
287         END_SIGN
288 };
289
290 /* Module Loadable parameters. */
291 static unsigned int tx_fifo_num = 1;
292 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
293     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
294 static unsigned int rx_ring_num = 1;
295 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
296     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297 static unsigned int rts_frm_len[MAX_RX_RINGS] =
298     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
299 static unsigned int use_continuous_tx_intrs = 1;
300 static unsigned int rmac_pause_time = 65535;
301 static unsigned int mc_pause_threshold_q0q3 = 187;
302 static unsigned int mc_pause_threshold_q4q7 = 187;
303 static unsigned int shared_splits;
304 static unsigned int tmac_util_period = 5;
305 static unsigned int rmac_util_period = 5;
306 static unsigned int bimodal = 0;
307 #ifndef CONFIG_S2IO_NAPI
308 static unsigned int indicate_max_pkts;
309 #endif
310 /* Frequency of Rx desc syncs expressed as power of 2 */
311 static unsigned int rxsync_frequency = 3;
312 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
313 static unsigned int intr_type = 0;
314
315 /*
316  * S2IO device table.
317  * This table lists all the devices that this driver supports.
318  */
319 static struct pci_device_id s2io_tbl[] __devinitdata = {
320         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
321          PCI_ANY_ID, PCI_ANY_ID},
322         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
323          PCI_ANY_ID, PCI_ANY_ID},
324         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
325          PCI_ANY_ID, PCI_ANY_ID},
326         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
327          PCI_ANY_ID, PCI_ANY_ID},
328         {0,}
329 };
330
331 MODULE_DEVICE_TABLE(pci, s2io_tbl);
332
333 static struct pci_driver s2io_driver = {
334       .name = "S2IO",
335       .id_table = s2io_tbl,
336       .probe = s2io_init_nic,
337       .remove = __devexit_p(s2io_rem_nic),
338 };
339
340 /* A simplifier macro used both by init and free shared_mem Fns(). */
341 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
342
343 /**
344  * init_shared_mem - Allocation and Initialization of Memory
345  * @nic: Device private variable.
346  * Description: The function allocates all the memory areas shared
347  * between the NIC and the driver. This includes Tx descriptors,
348  * Rx descriptors and the statistics block.
349  */
350
351 static int init_shared_mem(struct s2io_nic *nic)
352 {
353         u32 size;
354         void *tmp_v_addr, *tmp_v_addr_next;
355         dma_addr_t tmp_p_addr, tmp_p_addr_next;
356         RxD_block_t *pre_rxd_blk = NULL;
357         int i, j, blk_cnt, rx_sz, tx_sz;
358         int lst_size, lst_per_page;
359         struct net_device *dev = nic->dev;
360 #ifdef CONFIG_2BUFF_MODE
361         unsigned long tmp;
362         buffAdd_t *ba;
363 #endif
364
365         mac_info_t *mac_control;
366         struct config_param *config;
367
368         mac_control = &nic->mac_control;
369         config = &nic->config;
370
371
372         /* Allocation and initialization of TXDLs in FIOFs */
373         size = 0;
374         for (i = 0; i < config->tx_fifo_num; i++) {
375                 size += config->tx_cfg[i].fifo_len;
376         }
377         if (size > MAX_AVAILABLE_TXDS) {
378                 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
379                           __FUNCTION__);
380                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
381                 return FAILURE;
382         }
383
384         lst_size = (sizeof(TxD_t) * config->max_txds);
385         tx_sz = lst_size * size;
386         lst_per_page = PAGE_SIZE / lst_size;
387
388         for (i = 0; i < config->tx_fifo_num; i++) {
389                 int fifo_len = config->tx_cfg[i].fifo_len;
390                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
391                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
392                                                           GFP_KERNEL);
393                 if (!mac_control->fifos[i].list_info) {
394                         DBG_PRINT(ERR_DBG,
395                                   "Malloc failed for list_info\n");
396                         return -ENOMEM;
397                 }
398                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
399         }
400         for (i = 0; i < config->tx_fifo_num; i++) {
401                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
402                                                 lst_per_page);
403                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
404                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
405                     config->tx_cfg[i].fifo_len - 1;
406                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
407                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
408                     config->tx_cfg[i].fifo_len - 1;
409                 mac_control->fifos[i].fifo_no = i;
410                 mac_control->fifos[i].nic = nic;
411                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
412
413                 for (j = 0; j < page_num; j++) {
414                         int k = 0;
415                         dma_addr_t tmp_p;
416                         void *tmp_v;
417                         tmp_v = pci_alloc_consistent(nic->pdev,
418                                                      PAGE_SIZE, &tmp_p);
419                         if (!tmp_v) {
420                                 DBG_PRINT(ERR_DBG,
421                                           "pci_alloc_consistent ");
422                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
423                                 return -ENOMEM;
424                         }
425                         /* If we got a zero DMA address(can happen on
426                          * certain platforms like PPC), reallocate.
427                          * Store virtual address of page we don't want,
428                          * to be freed later.
429                          */
430                         if (!tmp_p) {
431                                 mac_control->zerodma_virt_addr = tmp_v;
432                                 DBG_PRINT(INIT_DBG, 
433                                 "%s: Zero DMA address for TxDL. ", dev->name);
434                                 DBG_PRINT(INIT_DBG, 
435                                 "Virtual address %p\n", tmp_v);
436                                 tmp_v = pci_alloc_consistent(nic->pdev,
437                                                      PAGE_SIZE, &tmp_p);
438                                 if (!tmp_v) {
439                                         DBG_PRINT(ERR_DBG,
440                                           "pci_alloc_consistent ");
441                                         DBG_PRINT(ERR_DBG, "failed for TxDL\n");
442                                         return -ENOMEM;
443                                 }
444                         }
445                         while (k < lst_per_page) {
446                                 int l = (j * lst_per_page) + k;
447                                 if (l == config->tx_cfg[i].fifo_len)
448                                         break;
449                                 mac_control->fifos[i].list_info[l].list_virt_addr =
450                                     tmp_v + (k * lst_size);
451                                 mac_control->fifos[i].list_info[l].list_phy_addr =
452                                     tmp_p + (k * lst_size);
453                                 k++;
454                         }
455                 }
456         }
457
458         /* Allocation and initialization of RXDs in Rings */
459         size = 0;
460         for (i = 0; i < config->rx_ring_num; i++) {
461                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
462                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
463                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
464                                   i);
465                         DBG_PRINT(ERR_DBG, "RxDs per Block");
466                         return FAILURE;
467                 }
468                 size += config->rx_cfg[i].num_rxd;
469                 mac_control->rings[i].block_count =
470                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
471                 mac_control->rings[i].pkt_cnt =
472                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
473         }
474         size = (size * (sizeof(RxD_t)));
475         rx_sz = size;
476
477         for (i = 0; i < config->rx_ring_num; i++) {
478                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
479                 mac_control->rings[i].rx_curr_get_info.offset = 0;
480                 mac_control->rings[i].rx_curr_get_info.ring_len =
481                     config->rx_cfg[i].num_rxd - 1;
482                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
483                 mac_control->rings[i].rx_curr_put_info.offset = 0;
484                 mac_control->rings[i].rx_curr_put_info.ring_len =
485                     config->rx_cfg[i].num_rxd - 1;
486                 mac_control->rings[i].nic = nic;
487                 mac_control->rings[i].ring_no = i;
488
489                 blk_cnt =
490                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
491                 /*  Allocating all the Rx blocks */
492                 for (j = 0; j < blk_cnt; j++) {
493 #ifndef CONFIG_2BUFF_MODE
494                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
495 #else
496                         size = SIZE_OF_BLOCK;
497 #endif
498                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
499                                                           &tmp_p_addr);
500                         if (tmp_v_addr == NULL) {
501                                 /*
502                                  * In case of failure, free_shared_mem()
503                                  * is called, which should free any
504                                  * memory that was alloced till the
505                                  * failure happened.
506                                  */
507                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
508                                     tmp_v_addr;
509                                 return -ENOMEM;
510                         }
511                         memset(tmp_v_addr, 0, size);
512                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
513                                 tmp_v_addr;
514                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
515                                 tmp_p_addr;
516                 }
517                 /* Interlinking all Rx Blocks */
518                 for (j = 0; j < blk_cnt; j++) {
519                         tmp_v_addr =
520                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
521                         tmp_v_addr_next =
522                                 mac_control->rings[i].rx_blocks[(j + 1) %
523                                               blk_cnt].block_virt_addr;
524                         tmp_p_addr =
525                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
526                         tmp_p_addr_next =
527                                 mac_control->rings[i].rx_blocks[(j + 1) %
528                                               blk_cnt].block_dma_addr;
529
530                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
531                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
532                                                                  * marker.
533                                                                  */
534 #ifndef CONFIG_2BUFF_MODE
535                         pre_rxd_blk->reserved_2_pNext_RxD_block =
536                             (unsigned long) tmp_v_addr_next;
537 #endif
538                         pre_rxd_blk->pNext_RxD_Blk_physical =
539                             (u64) tmp_p_addr_next;
540                 }
541         }
542
543 #ifdef CONFIG_2BUFF_MODE
544         /*
545          * Allocation of Storages for buffer addresses in 2BUFF mode
546          * and the buffers as well.
547          */
548         for (i = 0; i < config->rx_ring_num; i++) {
549                 blk_cnt =
550                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
551                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
552                                      GFP_KERNEL);
553                 if (!mac_control->rings[i].ba)
554                         return -ENOMEM;
555                 for (j = 0; j < blk_cnt; j++) {
556                         int k = 0;
557                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
558                                                  (MAX_RXDS_PER_BLOCK + 1)),
559                                                 GFP_KERNEL);
560                         if (!mac_control->rings[i].ba[j])
561                                 return -ENOMEM;
562                         while (k != MAX_RXDS_PER_BLOCK) {
563                                 ba = &mac_control->rings[i].ba[j][k];
564
565                                 ba->ba_0_org = (void *) kmalloc
566                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
567                                 if (!ba->ba_0_org)
568                                         return -ENOMEM;
569                                 tmp = (unsigned long) ba->ba_0_org;
570                                 tmp += ALIGN_SIZE;
571                                 tmp &= ~((unsigned long) ALIGN_SIZE);
572                                 ba->ba_0 = (void *) tmp;
573
574                                 ba->ba_1_org = (void *) kmalloc
575                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
576                                 if (!ba->ba_1_org)
577                                         return -ENOMEM;
578                                 tmp = (unsigned long) ba->ba_1_org;
579                                 tmp += ALIGN_SIZE;
580                                 tmp &= ~((unsigned long) ALIGN_SIZE);
581                                 ba->ba_1 = (void *) tmp;
582                                 k++;
583                         }
584                 }
585         }
586 #endif
587
588         /* Allocation and initialization of Statistics block */
589         size = sizeof(StatInfo_t);
590         mac_control->stats_mem = pci_alloc_consistent
591             (nic->pdev, size, &mac_control->stats_mem_phy);
592
593         if (!mac_control->stats_mem) {
594                 /*
595                  * In case of failure, free_shared_mem() is called, which
596                  * should free any memory that was alloced till the
597                  * failure happened.
598                  */
599                 return -ENOMEM;
600         }
601         mac_control->stats_mem_sz = size;
602
603         tmp_v_addr = mac_control->stats_mem;
604         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
605         memset(tmp_v_addr, 0, size);
606         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
607                   (unsigned long long) tmp_p_addr);
608
609         return SUCCESS;
610 }
611
612 /**
613  * free_shared_mem - Free the allocated Memory
614  * @nic:  Device private variable.
615  * Description: This function is to free all memory locations allocated by
616  * the init_shared_mem() function and return it to the kernel.
617  */
618
619 static void free_shared_mem(struct s2io_nic *nic)
620 {
621         int i, j, blk_cnt, size;
622         void *tmp_v_addr;
623         dma_addr_t tmp_p_addr;
624         mac_info_t *mac_control;
625         struct config_param *config;
626         int lst_size, lst_per_page;
627         struct net_device *dev = nic->dev;
628
629         if (!nic)
630                 return;
631
632         mac_control = &nic->mac_control;
633         config = &nic->config;
634
635         lst_size = (sizeof(TxD_t) * config->max_txds);
636         lst_per_page = PAGE_SIZE / lst_size;
637
638         for (i = 0; i < config->tx_fifo_num; i++) {
639                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
640                                                 lst_per_page);
641                 for (j = 0; j < page_num; j++) {
642                         int mem_blks = (j * lst_per_page);
643                         if (!mac_control->fifos[i].list_info)
644                                 return; 
645                         if (!mac_control->fifos[i].list_info[mem_blks].
646                                  list_virt_addr)
647                                 break;
648                         pci_free_consistent(nic->pdev, PAGE_SIZE,
649                                             mac_control->fifos[i].
650                                             list_info[mem_blks].
651                                             list_virt_addr,
652                                             mac_control->fifos[i].
653                                             list_info[mem_blks].
654                                             list_phy_addr);
655                 }
656                 /* If we got a zero DMA address during allocation,
657                  * free the page now
658                  */
659                 if (mac_control->zerodma_virt_addr) {
660                         pci_free_consistent(nic->pdev, PAGE_SIZE,
661                                             mac_control->zerodma_virt_addr,
662                                             (dma_addr_t)0);
663                         DBG_PRINT(INIT_DBG, 
664                                 "%s: Freeing TxDL with zero DMA addr. ",
665                                 dev->name);
666                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
667                                 mac_control->zerodma_virt_addr);
668                 }
669                 kfree(mac_control->fifos[i].list_info);
670         }
671
672 #ifndef CONFIG_2BUFF_MODE
673         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
674 #else
675         size = SIZE_OF_BLOCK;
676 #endif
677         for (i = 0; i < config->rx_ring_num; i++) {
678                 blk_cnt = mac_control->rings[i].block_count;
679                 for (j = 0; j < blk_cnt; j++) {
680                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
681                                 block_virt_addr;
682                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
683                                 block_dma_addr;
684                         if (tmp_v_addr == NULL)
685                                 break;
686                         pci_free_consistent(nic->pdev, size,
687                                             tmp_v_addr, tmp_p_addr);
688                 }
689         }
690
691 #ifdef CONFIG_2BUFF_MODE
692         /* Freeing buffer storage addresses in 2BUFF mode. */
693         for (i = 0; i < config->rx_ring_num; i++) {
694                 blk_cnt =
695                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
696                 for (j = 0; j < blk_cnt; j++) {
697                         int k = 0;
698                         if (!mac_control->rings[i].ba[j])
699                                 continue;
700                         while (k != MAX_RXDS_PER_BLOCK) {
701                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
702                                 kfree(ba->ba_0_org);
703                                 kfree(ba->ba_1_org);
704                                 k++;
705                         }
706                         kfree(mac_control->rings[i].ba[j]);
707                 }
708                 kfree(mac_control->rings[i].ba);
709         }
710 #endif
711
712         if (mac_control->stats_mem) {
713                 pci_free_consistent(nic->pdev,
714                                     mac_control->stats_mem_sz,
715                                     mac_control->stats_mem,
716                                     mac_control->stats_mem_phy);
717         }
718 }
719
720 /**
721  * s2io_verify_pci_mode -
722  */
723
724 static int s2io_verify_pci_mode(nic_t *nic)
725 {
726         XENA_dev_config_t __iomem *bar0 = nic->bar0;
727         register u64 val64 = 0;
728         int     mode;
729
730         val64 = readq(&bar0->pci_mode);
731         mode = (u8)GET_PCI_MODE(val64);
732
733         if ( val64 & PCI_MODE_UNKNOWN_MODE)
734                 return -1;      /* Unknown PCI mode */
735         return mode;
736 }
737
738
739 /**
740  * s2io_print_pci_mode -
741  */
742 static int s2io_print_pci_mode(nic_t *nic)
743 {
744         XENA_dev_config_t __iomem *bar0 = nic->bar0;
745         register u64 val64 = 0;
746         int     mode;
747         struct config_param *config = &nic->config;
748
749         val64 = readq(&bar0->pci_mode);
750         mode = (u8)GET_PCI_MODE(val64);
751
752         if ( val64 & PCI_MODE_UNKNOWN_MODE)
753                 return -1;      /* Unknown PCI mode */
754
755         if (val64 & PCI_MODE_32_BITS) {
756                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
757         } else {
758                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
759         }
760
761         switch(mode) {
762                 case PCI_MODE_PCI_33:
763                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
764                         config->bus_speed = 33;
765                         break;
766                 case PCI_MODE_PCI_66:
767                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
768                         config->bus_speed = 133;
769                         break;
770                 case PCI_MODE_PCIX_M1_66:
771                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
772                         config->bus_speed = 133; /* Herc doubles the clock rate */
773                         break;
774                 case PCI_MODE_PCIX_M1_100:
775                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
776                         config->bus_speed = 200;
777                         break;
778                 case PCI_MODE_PCIX_M1_133:
779                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
780                         config->bus_speed = 266;
781                         break;
782                 case PCI_MODE_PCIX_M2_66:
783                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
784                         config->bus_speed = 133;
785                         break;
786                 case PCI_MODE_PCIX_M2_100:
787                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
788                         config->bus_speed = 200;
789                         break;
790                 case PCI_MODE_PCIX_M2_133:
791                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
792                         config->bus_speed = 266;
793                         break;
794                 default:
795                         return -1;      /* Unsupported bus speed */
796         }
797
798         return mode;
799 }
800
801 /**
802  *  init_nic - Initialization of hardware
803  *  @nic: device peivate variable
804  *  Description: The function sequentially configures every block
805  *  of the H/W from their reset values.
806  *  Return Value:  SUCCESS on success and
807  *  '-1' on failure (endian settings incorrect).
808  */
809
810 static int init_nic(struct s2io_nic *nic)
811 {
812         XENA_dev_config_t __iomem *bar0 = nic->bar0;
813         struct net_device *dev = nic->dev;
814         register u64 val64 = 0;
815         void __iomem *add;
816         u32 time;
817         int i, j;
818         mac_info_t *mac_control;
819         struct config_param *config;
820         int mdio_cnt = 0, dtx_cnt = 0;
821         unsigned long long mem_share;
822         int mem_size;
823
824         mac_control = &nic->mac_control;
825         config = &nic->config;
826
827         /* to set the swapper controle on the card */
828         if(s2io_set_swapper(nic)) {
829                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
830                 return -1;
831         }
832
833         /*
834          * Herc requires EOI to be removed from reset before XGXS, so..
835          */
836         if (nic->device_type & XFRAME_II_DEVICE) {
837                 val64 = 0xA500000000ULL;
838                 writeq(val64, &bar0->sw_reset);
839                 msleep(500);
840                 val64 = readq(&bar0->sw_reset);
841         }
842
843         /* Remove XGXS from reset state */
844         val64 = 0;
845         writeq(val64, &bar0->sw_reset);
846         msleep(500);
847         val64 = readq(&bar0->sw_reset);
848
849         /*  Enable Receiving broadcasts */
850         add = &bar0->mac_cfg;
851         val64 = readq(&bar0->mac_cfg);
852         val64 |= MAC_RMAC_BCAST_ENABLE;
853         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
854         writel((u32) val64, add);
855         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
856         writel((u32) (val64 >> 32), (add + 4));
857
858         /* Read registers in all blocks */
859         val64 = readq(&bar0->mac_int_mask);
860         val64 = readq(&bar0->mc_int_mask);
861         val64 = readq(&bar0->xgxs_int_mask);
862
863         /*  Set MTU */
864         val64 = dev->mtu;
865         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
866
867         /*
868          * Configuring the XAUI Interface of Xena.
869          * ***************************************
870          * To Configure the Xena's XAUI, one has to write a series
871          * of 64 bit values into two registers in a particular
872          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
873          * which will be defined in the array of configuration values
874          * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
875          * to switch writing from one regsiter to another. We continue
876          * writing these values until we encounter the 'END_SIGN' macro.
877          * For example, After making a series of 21 writes into
878          * dtx_control register the 'SWITCH_SIGN' appears and hence we
879          * start writing into mdio_control until we encounter END_SIGN.
880          */
881         if (nic->device_type & XFRAME_II_DEVICE) {
882                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
883                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
884                                           &bar0->dtx_control, UF);
885                         if (dtx_cnt & 0x1)
886                                 msleep(1); /* Necessary!! */
887                         dtx_cnt++;
888                 }
889         } else {
890                 while (1) {
891                       dtx_cfg:
892                         while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
893                                 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
894                                         dtx_cnt++;
895                                         goto mdio_cfg;
896                                 }
897                                 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
898                                                   &bar0->dtx_control, UF);
899                                 val64 = readq(&bar0->dtx_control);
900                                 dtx_cnt++;
901                         }
902                       mdio_cfg:
903                         while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
904                                 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
905                                         mdio_cnt++;
906                                         goto dtx_cfg;
907                                 }
908                                 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
909                                                   &bar0->mdio_control, UF);
910                                 val64 = readq(&bar0->mdio_control);
911                                 mdio_cnt++;
912                         }
913                         if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
914                             (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
915                                 break;
916                         } else {
917                                 goto dtx_cfg;
918                         }
919                 }
920         }
921
922         /*  Tx DMA Initialization */
923         val64 = 0;
924         writeq(val64, &bar0->tx_fifo_partition_0);
925         writeq(val64, &bar0->tx_fifo_partition_1);
926         writeq(val64, &bar0->tx_fifo_partition_2);
927         writeq(val64, &bar0->tx_fifo_partition_3);
928
929
930         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
931                 val64 |=
932                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
933                          13) | vBIT(config->tx_cfg[i].fifo_priority,
934                                     ((i * 32) + 5), 3);
935
936                 if (i == (config->tx_fifo_num - 1)) {
937                         if (i % 2 == 0)
938                                 i++;
939                 }
940
941                 switch (i) {
942                 case 1:
943                         writeq(val64, &bar0->tx_fifo_partition_0);
944                         val64 = 0;
945                         break;
946                 case 3:
947                         writeq(val64, &bar0->tx_fifo_partition_1);
948                         val64 = 0;
949                         break;
950                 case 5:
951                         writeq(val64, &bar0->tx_fifo_partition_2);
952                         val64 = 0;
953                         break;
954                 case 7:
955                         writeq(val64, &bar0->tx_fifo_partition_3);
956                         break;
957                 }
958         }
959
960         /* Enable Tx FIFO partition 0. */
961         val64 = readq(&bar0->tx_fifo_partition_0);
962         val64 |= BIT(0);        /* To enable the FIFO partition. */
963         writeq(val64, &bar0->tx_fifo_partition_0);
964
965         /*
966          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
967          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
968          */
969         if ((nic->device_type == XFRAME_I_DEVICE) &&
970                 (get_xena_rev_id(nic->pdev) < 4))
971                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
972
973         val64 = readq(&bar0->tx_fifo_partition_0);
974         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
975                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
976
977         /*
978          * Initialization of Tx_PA_CONFIG register to ignore packet
979          * integrity checking.
980          */
981         val64 = readq(&bar0->tx_pa_cfg);
982         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
983             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
984         writeq(val64, &bar0->tx_pa_cfg);
985
986         /* Rx DMA intialization. */
987         val64 = 0;
988         for (i = 0; i < config->rx_ring_num; i++) {
989                 val64 |=
990                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
991                          3);
992         }
993         writeq(val64, &bar0->rx_queue_priority);
994
995         /*
996          * Allocating equal share of memory to all the
997          * configured Rings.
998          */
999         val64 = 0;
1000         if (nic->device_type & XFRAME_II_DEVICE)
1001                 mem_size = 32;
1002         else
1003                 mem_size = 64;
1004
1005         for (i = 0; i < config->rx_ring_num; i++) {
1006                 switch (i) {
1007                 case 0:
1008                         mem_share = (mem_size / config->rx_ring_num +
1009                                      mem_size % config->rx_ring_num);
1010                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1011                         continue;
1012                 case 1:
1013                         mem_share = (mem_size / config->rx_ring_num);
1014                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1015                         continue;
1016                 case 2:
1017                         mem_share = (mem_size / config->rx_ring_num);
1018                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1019                         continue;
1020                 case 3:
1021                         mem_share = (mem_size / config->rx_ring_num);
1022                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1023                         continue;
1024                 case 4:
1025                         mem_share = (mem_size / config->rx_ring_num);
1026                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1027                         continue;
1028                 case 5:
1029                         mem_share = (mem_size / config->rx_ring_num);
1030                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1031                         continue;
1032                 case 6:
1033                         mem_share = (mem_size / config->rx_ring_num);
1034                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1035                         continue;
1036                 case 7:
1037                         mem_share = (mem_size / config->rx_ring_num);
1038                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1039                         continue;
1040                 }
1041         }
1042         writeq(val64, &bar0->rx_queue_cfg);
1043
1044         /*
1045          * Filling Tx round robin registers
1046          * as per the number of FIFOs
1047          */
1048         switch (config->tx_fifo_num) {
1049         case 1:
1050                 val64 = 0x0000000000000000ULL;
1051                 writeq(val64, &bar0->tx_w_round_robin_0);
1052                 writeq(val64, &bar0->tx_w_round_robin_1);
1053                 writeq(val64, &bar0->tx_w_round_robin_2);
1054                 writeq(val64, &bar0->tx_w_round_robin_3);
1055                 writeq(val64, &bar0->tx_w_round_robin_4);
1056                 break;
1057         case 2:
1058                 val64 = 0x0000010000010000ULL;
1059                 writeq(val64, &bar0->tx_w_round_robin_0);
1060                 val64 = 0x0100000100000100ULL;
1061                 writeq(val64, &bar0->tx_w_round_robin_1);
1062                 val64 = 0x0001000001000001ULL;
1063                 writeq(val64, &bar0->tx_w_round_robin_2);
1064                 val64 = 0x0000010000010000ULL;
1065                 writeq(val64, &bar0->tx_w_round_robin_3);
1066                 val64 = 0x0100000000000000ULL;
1067                 writeq(val64, &bar0->tx_w_round_robin_4);
1068                 break;
1069         case 3:
1070                 val64 = 0x0001000102000001ULL;
1071                 writeq(val64, &bar0->tx_w_round_robin_0);
1072                 val64 = 0x0001020000010001ULL;
1073                 writeq(val64, &bar0->tx_w_round_robin_1);
1074                 val64 = 0x0200000100010200ULL;
1075                 writeq(val64, &bar0->tx_w_round_robin_2);
1076                 val64 = 0x0001000102000001ULL;
1077                 writeq(val64, &bar0->tx_w_round_robin_3);
1078                 val64 = 0x0001020000000000ULL;
1079                 writeq(val64, &bar0->tx_w_round_robin_4);
1080                 break;
1081         case 4:
1082                 val64 = 0x0001020300010200ULL;
1083                 writeq(val64, &bar0->tx_w_round_robin_0);
1084                 val64 = 0x0100000102030001ULL;
1085                 writeq(val64, &bar0->tx_w_round_robin_1);
1086                 val64 = 0x0200010000010203ULL;
1087                 writeq(val64, &bar0->tx_w_round_robin_2);
1088                 val64 = 0x0001020001000001ULL;
1089                 writeq(val64, &bar0->tx_w_round_robin_3);
1090                 val64 = 0x0203000100000000ULL;
1091                 writeq(val64, &bar0->tx_w_round_robin_4);
1092                 break;
1093         case 5:
1094                 val64 = 0x0001000203000102ULL;
1095                 writeq(val64, &bar0->tx_w_round_robin_0);
1096                 val64 = 0x0001020001030004ULL;
1097                 writeq(val64, &bar0->tx_w_round_robin_1);
1098                 val64 = 0x0001000203000102ULL;
1099                 writeq(val64, &bar0->tx_w_round_robin_2);
1100                 val64 = 0x0001020001030004ULL;
1101                 writeq(val64, &bar0->tx_w_round_robin_3);
1102                 val64 = 0x0001000000000000ULL;
1103                 writeq(val64, &bar0->tx_w_round_robin_4);
1104                 break;
1105         case 6:
1106                 val64 = 0x0001020304000102ULL;
1107                 writeq(val64, &bar0->tx_w_round_robin_0);
1108                 val64 = 0x0304050001020001ULL;
1109                 writeq(val64, &bar0->tx_w_round_robin_1);
1110                 val64 = 0x0203000100000102ULL;
1111                 writeq(val64, &bar0->tx_w_round_robin_2);
1112                 val64 = 0x0304000102030405ULL;
1113                 writeq(val64, &bar0->tx_w_round_robin_3);
1114                 val64 = 0x0001000200000000ULL;
1115                 writeq(val64, &bar0->tx_w_round_robin_4);
1116                 break;
1117         case 7:
1118                 val64 = 0x0001020001020300ULL;
1119                 writeq(val64, &bar0->tx_w_round_robin_0);
1120                 val64 = 0x0102030400010203ULL;
1121                 writeq(val64, &bar0->tx_w_round_robin_1);
1122                 val64 = 0x0405060001020001ULL;
1123                 writeq(val64, &bar0->tx_w_round_robin_2);
1124                 val64 = 0x0304050000010200ULL;
1125                 writeq(val64, &bar0->tx_w_round_robin_3);
1126                 val64 = 0x0102030000000000ULL;
1127                 writeq(val64, &bar0->tx_w_round_robin_4);
1128                 break;
1129         case 8:
1130                 val64 = 0x0001020300040105ULL;
1131                 writeq(val64, &bar0->tx_w_round_robin_0);
1132                 val64 = 0x0200030106000204ULL;
1133                 writeq(val64, &bar0->tx_w_round_robin_1);
1134                 val64 = 0x0103000502010007ULL;
1135                 writeq(val64, &bar0->tx_w_round_robin_2);
1136                 val64 = 0x0304010002060500ULL;
1137                 writeq(val64, &bar0->tx_w_round_robin_3);
1138                 val64 = 0x0103020400000000ULL;
1139                 writeq(val64, &bar0->tx_w_round_robin_4);
1140                 break;
1141         }
1142
1143         /* Filling the Rx round robin registers as per the
1144          * number of Rings and steering based on QoS.
1145          */
1146         switch (config->rx_ring_num) {
1147         case 1:
1148                 val64 = 0x8080808080808080ULL;
1149                 writeq(val64, &bar0->rts_qos_steering);
1150                 break;
1151         case 2:
1152                 val64 = 0x0000010000010000ULL;
1153                 writeq(val64, &bar0->rx_w_round_robin_0);
1154                 val64 = 0x0100000100000100ULL;
1155                 writeq(val64, &bar0->rx_w_round_robin_1);
1156                 val64 = 0x0001000001000001ULL;
1157                 writeq(val64, &bar0->rx_w_round_robin_2);
1158                 val64 = 0x0000010000010000ULL;
1159                 writeq(val64, &bar0->rx_w_round_robin_3);
1160                 val64 = 0x0100000000000000ULL;
1161                 writeq(val64, &bar0->rx_w_round_robin_4);
1162
1163                 val64 = 0x8080808040404040ULL;
1164                 writeq(val64, &bar0->rts_qos_steering);
1165                 break;
1166         case 3:
1167                 val64 = 0x0001000102000001ULL;
1168                 writeq(val64, &bar0->rx_w_round_robin_0);
1169                 val64 = 0x0001020000010001ULL;
1170                 writeq(val64, &bar0->rx_w_round_robin_1);
1171                 val64 = 0x0200000100010200ULL;
1172                 writeq(val64, &bar0->rx_w_round_robin_2);
1173                 val64 = 0x0001000102000001ULL;
1174                 writeq(val64, &bar0->rx_w_round_robin_3);
1175                 val64 = 0x0001020000000000ULL;
1176                 writeq(val64, &bar0->rx_w_round_robin_4);
1177
1178                 val64 = 0x8080804040402020ULL;
1179                 writeq(val64, &bar0->rts_qos_steering);
1180                 break;
1181         case 4:
1182                 val64 = 0x0001020300010200ULL;
1183                 writeq(val64, &bar0->rx_w_round_robin_0);
1184                 val64 = 0x0100000102030001ULL;
1185                 writeq(val64, &bar0->rx_w_round_robin_1);
1186                 val64 = 0x0200010000010203ULL;
1187                 writeq(val64, &bar0->rx_w_round_robin_2);
1188                 val64 = 0x0001020001000001ULL;  
1189                 writeq(val64, &bar0->rx_w_round_robin_3);
1190                 val64 = 0x0203000100000000ULL;
1191                 writeq(val64, &bar0->rx_w_round_robin_4);
1192
1193                 val64 = 0x8080404020201010ULL;
1194                 writeq(val64, &bar0->rts_qos_steering);
1195                 break;
1196         case 5:
1197                 val64 = 0x0001000203000102ULL;
1198                 writeq(val64, &bar0->rx_w_round_robin_0);
1199                 val64 = 0x0001020001030004ULL;
1200                 writeq(val64, &bar0->rx_w_round_robin_1);
1201                 val64 = 0x0001000203000102ULL;
1202                 writeq(val64, &bar0->rx_w_round_robin_2);
1203                 val64 = 0x0001020001030004ULL;
1204                 writeq(val64, &bar0->rx_w_round_robin_3);
1205                 val64 = 0x0001000000000000ULL;
1206                 writeq(val64, &bar0->rx_w_round_robin_4);
1207
1208                 val64 = 0x8080404020201008ULL;
1209                 writeq(val64, &bar0->rts_qos_steering);
1210                 break;
1211         case 6:
1212                 val64 = 0x0001020304000102ULL;
1213                 writeq(val64, &bar0->rx_w_round_robin_0);
1214                 val64 = 0x0304050001020001ULL;
1215                 writeq(val64, &bar0->rx_w_round_robin_1);
1216                 val64 = 0x0203000100000102ULL;
1217                 writeq(val64, &bar0->rx_w_round_robin_2);
1218                 val64 = 0x0304000102030405ULL;
1219                 writeq(val64, &bar0->rx_w_round_robin_3);
1220                 val64 = 0x0001000200000000ULL;
1221                 writeq(val64, &bar0->rx_w_round_robin_4);
1222
1223                 val64 = 0x8080404020100804ULL;
1224                 writeq(val64, &bar0->rts_qos_steering);
1225                 break;
1226         case 7:
1227                 val64 = 0x0001020001020300ULL;
1228                 writeq(val64, &bar0->rx_w_round_robin_0);
1229                 val64 = 0x0102030400010203ULL;
1230                 writeq(val64, &bar0->rx_w_round_robin_1);
1231                 val64 = 0x0405060001020001ULL;
1232                 writeq(val64, &bar0->rx_w_round_robin_2);
1233                 val64 = 0x0304050000010200ULL;
1234                 writeq(val64, &bar0->rx_w_round_robin_3);
1235                 val64 = 0x0102030000000000ULL;
1236                 writeq(val64, &bar0->rx_w_round_robin_4);
1237
1238                 val64 = 0x8080402010080402ULL;
1239                 writeq(val64, &bar0->rts_qos_steering);
1240                 break;
1241         case 8:
1242                 val64 = 0x0001020300040105ULL;
1243                 writeq(val64, &bar0->rx_w_round_robin_0);
1244                 val64 = 0x0200030106000204ULL;
1245                 writeq(val64, &bar0->rx_w_round_robin_1);
1246                 val64 = 0x0103000502010007ULL;
1247                 writeq(val64, &bar0->rx_w_round_robin_2);
1248                 val64 = 0x0304010002060500ULL;
1249                 writeq(val64, &bar0->rx_w_round_robin_3);
1250                 val64 = 0x0103020400000000ULL;
1251                 writeq(val64, &bar0->rx_w_round_robin_4);
1252
1253                 val64 = 0x8040201008040201ULL;
1254                 writeq(val64, &bar0->rts_qos_steering);
1255                 break;
1256         }
1257
1258         /* UDP Fix */
1259         val64 = 0;
1260         for (i = 0; i < 8; i++)
1261                 writeq(val64, &bar0->rts_frm_len_n[i]);
1262
1263         /* Set the default rts frame length for the rings configured */
1264         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1265         for (i = 0 ; i < config->rx_ring_num ; i++)
1266                 writeq(val64, &bar0->rts_frm_len_n[i]);
1267
1268         /* Set the frame length for the configured rings
1269          * desired by the user
1270          */
1271         for (i = 0; i < config->rx_ring_num; i++) {
1272                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1273                  * specified frame length steering.
1274                  * If the user provides the frame length then program
1275                  * the rts_frm_len register for those values or else
1276                  * leave it as it is.
1277                  */
1278                 if (rts_frm_len[i] != 0) {
1279                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1280                                 &bar0->rts_frm_len_n[i]);
1281                 }
1282         }
1283
1284         /* Program statistics memory */
1285         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1286
1287         if (nic->device_type == XFRAME_II_DEVICE) {
1288                 val64 = STAT_BC(0x320);
1289                 writeq(val64, &bar0->stat_byte_cnt);
1290         }
1291
1292         /*
1293          * Initializing the sampling rate for the device to calculate the
1294          * bandwidth utilization.
1295          */
1296         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1297             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1298         writeq(val64, &bar0->mac_link_util);
1299
1300
1301         /*
1302          * Initializing the Transmit and Receive Traffic Interrupt
1303          * Scheme.
1304          */
1305         /*
1306          * TTI Initialization. Default Tx timer gets us about
1307          * 250 interrupts per sec. Continuous interrupts are enabled
1308          * by default.
1309          */
1310         if (nic->device_type == XFRAME_II_DEVICE) {
1311                 int count = (nic->config.bus_speed * 125)/2;
1312                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1313         } else {
1314
1315                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1316         }
1317         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1318             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1319             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1320                 if (use_continuous_tx_intrs)
1321                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1322         writeq(val64, &bar0->tti_data1_mem);
1323
1324         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1325             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1326             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1327         writeq(val64, &bar0->tti_data2_mem);
1328
1329         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1330         writeq(val64, &bar0->tti_command_mem);
1331
1332         /*
1333          * Once the operation completes, the Strobe bit of the command
1334          * register will be reset. We poll for this particular condition
1335          * We wait for a maximum of 500ms for the operation to complete,
1336          * if it's not complete by then we return error.
1337          */
1338         time = 0;
1339         while (TRUE) {
1340                 val64 = readq(&bar0->tti_command_mem);
1341                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1342                         break;
1343                 }
1344                 if (time > 10) {
1345                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1346                                   dev->name);
1347                         return -1;
1348                 }
1349                 msleep(50);
1350                 time++;
1351         }
1352
1353         if (nic->config.bimodal) {
1354                 int k = 0;
1355                 for (k = 0; k < config->rx_ring_num; k++) {
1356                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1357                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1358                         writeq(val64, &bar0->tti_command_mem);
1359
1360                 /*
1361                  * Once the operation completes, the Strobe bit of the command
1362                  * register will be reset. We poll for this particular condition
1363                  * We wait for a maximum of 500ms for the operation to complete,
1364                  * if it's not complete by then we return error.
1365                 */
1366                         time = 0;
1367                         while (TRUE) {
1368                                 val64 = readq(&bar0->tti_command_mem);
1369                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1370                                         break;
1371                                 }
1372                                 if (time > 10) {
1373                                         DBG_PRINT(ERR_DBG,
1374                                                 "%s: TTI init Failed\n",
1375                                         dev->name);
1376                                         return -1;
1377                                 }
1378                                 time++;
1379                                 msleep(50);
1380                         }
1381                 }
1382         } else {
1383
1384                 /* RTI Initialization */
1385                 if (nic->device_type == XFRAME_II_DEVICE) {
1386                         /*
1387                          * Programmed to generate Apprx 500 Intrs per
1388                          * second
1389                          */
1390                         int count = (nic->config.bus_speed * 125)/4;
1391                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1392                 } else {
1393                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1394                 }
1395                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1396                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1397                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1398
1399                 writeq(val64, &bar0->rti_data1_mem);
1400
1401                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1402                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1403                 if (nic->intr_type == MSI_X)
1404                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1405                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1406                 else
1407                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1408                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1409                 writeq(val64, &bar0->rti_data2_mem);
1410
1411                 for (i = 0; i < config->rx_ring_num; i++) {
1412                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1413                                         | RTI_CMD_MEM_OFFSET(i);
1414                         writeq(val64, &bar0->rti_command_mem);
1415
1416                         /*
1417                          * Once the operation completes, the Strobe bit of the
1418                          * command register will be reset. We poll for this
1419                          * particular condition. We wait for a maximum of 500ms
1420                          * for the operation to complete, if it's not complete
1421                          * by then we return error.
1422                          */
1423                         time = 0;
1424                         while (TRUE) {
1425                                 val64 = readq(&bar0->rti_command_mem);
1426                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1427                                         break;
1428                                 }
1429                                 if (time > 10) {
1430                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1431                                                   dev->name);
1432                                         return -1;
1433                                 }
1434                                 time++;
1435                                 msleep(50);
1436                         }
1437                 }
1438         }
1439
1440         /*
1441          * Initializing proper values as Pause threshold into all
1442          * the 8 Queues on Rx side.
1443          */
1444         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1445         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1446
1447         /* Disable RMAC PAD STRIPPING */
1448         add = &bar0->mac_cfg;
1449         val64 = readq(&bar0->mac_cfg);
1450         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1451         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1452         writel((u32) (val64), add);
1453         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1454         writel((u32) (val64 >> 32), (add + 4));
1455         val64 = readq(&bar0->mac_cfg);
1456
1457         /*
1458          * Set the time value to be inserted in the pause frame
1459          * generated by xena.
1460          */
1461         val64 = readq(&bar0->rmac_pause_cfg);
1462         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1463         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1464         writeq(val64, &bar0->rmac_pause_cfg);
1465
1466         /*
1467          * Set the Threshold Limit for Generating the pause frame
1468          * If the amount of data in any Queue exceeds ratio of
1469          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1470          * pause frame is generated
1471          */
1472         val64 = 0;
1473         for (i = 0; i < 4; i++) {
1474                 val64 |=
1475                     (((u64) 0xFF00 | nic->mac_control.
1476                       mc_pause_threshold_q0q3)
1477                      << (i * 2 * 8));
1478         }
1479         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1480
1481         val64 = 0;
1482         for (i = 0; i < 4; i++) {
1483                 val64 |=
1484                     (((u64) 0xFF00 | nic->mac_control.
1485                       mc_pause_threshold_q4q7)
1486                      << (i * 2 * 8));
1487         }
1488         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1489
1490         /*
1491          * TxDMA will stop Read request if the number of read split has
1492          * exceeded the limit pointed by shared_splits
1493          */
1494         val64 = readq(&bar0->pic_control);
1495         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1496         writeq(val64, &bar0->pic_control);
1497
1498         /*
1499          * Programming the Herc to split every write transaction
1500          * that does not start on an ADB to reduce disconnects.
1501          */
1502         if (nic->device_type == XFRAME_II_DEVICE) {
1503                 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1504                 writeq(val64, &bar0->wreq_split_mask);
1505         }
1506
1507         /* Setting Link stability period to 64 ms */ 
1508         if (nic->device_type == XFRAME_II_DEVICE) {
1509                 val64 = MISC_LINK_STABILITY_PRD(3);
1510                 writeq(val64, &bar0->misc_control);
1511         }
1512
1513         return SUCCESS;
1514 }
1515 #define LINK_UP_DOWN_INTERRUPT          1
1516 #define MAC_RMAC_ERR_TIMER              2
1517
1518 int s2io_link_fault_indication(nic_t *nic)
1519 {
1520         if (nic->intr_type != INTA)
1521                 return MAC_RMAC_ERR_TIMER;
1522         if (nic->device_type == XFRAME_II_DEVICE)
1523                 return LINK_UP_DOWN_INTERRUPT;
1524         else
1525                 return MAC_RMAC_ERR_TIMER;
1526 }
1527
1528 /**
1529  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1530  *  @nic: device private variable,
1531  *  @mask: A mask indicating which Intr block must be modified and,
1532  *  @flag: A flag indicating whether to enable or disable the Intrs.
1533  *  Description: This function will either disable or enable the interrupts
1534  *  depending on the flag argument. The mask argument can be used to
1535  *  enable/disable any Intr block.
1536  *  Return Value: NONE.
1537  */
1538
1539 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1540 {
1541         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1542         register u64 val64 = 0, temp64 = 0;
1543
1544         /*  Top level interrupt classification */
1545         /*  PIC Interrupts */
1546         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1547                 /*  Enable PIC Intrs in the general intr mask register */
1548                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1549                 if (flag == ENABLE_INTRS) {
1550                         temp64 = readq(&bar0->general_int_mask);
1551                         temp64 &= ~((u64) val64);
1552                         writeq(temp64, &bar0->general_int_mask);
1553                         /*
1554                          * If Hercules adapter enable GPIO otherwise
1555                          * disabled all PCIX, Flash, MDIO, IIC and GPIO
1556                          * interrupts for now.
1557                          * TODO
1558                          */
1559                         if (s2io_link_fault_indication(nic) ==
1560                                         LINK_UP_DOWN_INTERRUPT ) {
1561                                 temp64 = readq(&bar0->pic_int_mask);
1562                                 temp64 &= ~((u64) PIC_INT_GPIO);
1563                                 writeq(temp64, &bar0->pic_int_mask);
1564                                 temp64 = readq(&bar0->gpio_int_mask);
1565                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1566                                 writeq(temp64, &bar0->gpio_int_mask);
1567                         } else {
1568                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1569                         }
1570                         /*
1571                          * No MSI Support is available presently, so TTI and
1572                          * RTI interrupts are also disabled.
1573                          */
1574                 } else if (flag == DISABLE_INTRS) {
1575                         /*
1576                          * Disable PIC Intrs in the general
1577                          * intr mask register
1578                          */
1579                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1580                         temp64 = readq(&bar0->general_int_mask);
1581                         val64 |= temp64;
1582                         writeq(val64, &bar0->general_int_mask);
1583                 }
1584         }
1585
1586         /*  DMA Interrupts */
1587         /*  Enabling/Disabling Tx DMA interrupts */
1588         if (mask & TX_DMA_INTR) {
1589                 /* Enable TxDMA Intrs in the general intr mask register */
1590                 val64 = TXDMA_INT_M;
1591                 if (flag == ENABLE_INTRS) {
1592                         temp64 = readq(&bar0->general_int_mask);
1593                         temp64 &= ~((u64) val64);
1594                         writeq(temp64, &bar0->general_int_mask);
1595                         /*
1596                          * Keep all interrupts other than PFC interrupt
1597                          * and PCC interrupt disabled in DMA level.
1598                          */
1599                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1600                                                       TXDMA_PCC_INT_M);
1601                         writeq(val64, &bar0->txdma_int_mask);
1602                         /*
1603                          * Enable only the MISC error 1 interrupt in PFC block
1604                          */
1605                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1606                         writeq(val64, &bar0->pfc_err_mask);
1607                         /*
1608                          * Enable only the FB_ECC error interrupt in PCC block
1609                          */
1610                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1611                         writeq(val64, &bar0->pcc_err_mask);
1612                 } else if (flag == DISABLE_INTRS) {
1613                         /*
1614                          * Disable TxDMA Intrs in the general intr mask
1615                          * register
1616                          */
1617                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1618                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1619                         temp64 = readq(&bar0->general_int_mask);
1620                         val64 |= temp64;
1621                         writeq(val64, &bar0->general_int_mask);
1622                 }
1623         }
1624
1625         /*  Enabling/Disabling Rx DMA interrupts */
1626         if (mask & RX_DMA_INTR) {
1627                 /*  Enable RxDMA Intrs in the general intr mask register */
1628                 val64 = RXDMA_INT_M;
1629                 if (flag == ENABLE_INTRS) {
1630                         temp64 = readq(&bar0->general_int_mask);
1631                         temp64 &= ~((u64) val64);
1632                         writeq(temp64, &bar0->general_int_mask);
1633                         /*
1634                          * All RxDMA block interrupts are disabled for now
1635                          * TODO
1636                          */
1637                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1638                 } else if (flag == DISABLE_INTRS) {
1639                         /*
1640                          * Disable RxDMA Intrs in the general intr mask
1641                          * register
1642                          */
1643                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1644                         temp64 = readq(&bar0->general_int_mask);
1645                         val64 |= temp64;
1646                         writeq(val64, &bar0->general_int_mask);
1647                 }
1648         }
1649
1650         /*  MAC Interrupts */
1651         /*  Enabling/Disabling MAC interrupts */
1652         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1653                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1654                 if (flag == ENABLE_INTRS) {
1655                         temp64 = readq(&bar0->general_int_mask);
1656                         temp64 &= ~((u64) val64);
1657                         writeq(temp64, &bar0->general_int_mask);
1658                         /*
1659                          * All MAC block error interrupts are disabled for now
1660                          * TODO
1661                          */
1662                 } else if (flag == DISABLE_INTRS) {
1663                         /*
1664                          * Disable MAC Intrs in the general intr mask register
1665                          */
1666                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1667                         writeq(DISABLE_ALL_INTRS,
1668                                &bar0->mac_rmac_err_mask);
1669
1670                         temp64 = readq(&bar0->general_int_mask);
1671                         val64 |= temp64;
1672                         writeq(val64, &bar0->general_int_mask);
1673                 }
1674         }
1675
1676         /*  XGXS Interrupts */
1677         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1678                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1679                 if (flag == ENABLE_INTRS) {
1680                         temp64 = readq(&bar0->general_int_mask);
1681                         temp64 &= ~((u64) val64);
1682                         writeq(temp64, &bar0->general_int_mask);
1683                         /*
1684                          * All XGXS block error interrupts are disabled for now
1685                          * TODO
1686                          */
1687                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1688                 } else if (flag == DISABLE_INTRS) {
1689                         /*
1690                          * Disable MC Intrs in the general intr mask register
1691                          */
1692                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1693                         temp64 = readq(&bar0->general_int_mask);
1694                         val64 |= temp64;
1695                         writeq(val64, &bar0->general_int_mask);
1696                 }
1697         }
1698
1699         /*  Memory Controller(MC) interrupts */
1700         if (mask & MC_INTR) {
1701                 val64 = MC_INT_M;
1702                 if (flag == ENABLE_INTRS) {
1703                         temp64 = readq(&bar0->general_int_mask);
1704                         temp64 &= ~((u64) val64);
1705                         writeq(temp64, &bar0->general_int_mask);
1706                         /*
1707                          * Enable all MC Intrs.
1708                          */
1709                         writeq(0x0, &bar0->mc_int_mask);
1710                         writeq(0x0, &bar0->mc_err_mask);
1711                 } else if (flag == DISABLE_INTRS) {
1712                         /*
1713                          * Disable MC Intrs in the general intr mask register
1714                          */
1715                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1716                         temp64 = readq(&bar0->general_int_mask);
1717                         val64 |= temp64;
1718                         writeq(val64, &bar0->general_int_mask);
1719                 }
1720         }
1721
1722
1723         /*  Tx traffic interrupts */
1724         if (mask & TX_TRAFFIC_INTR) {
1725                 val64 = TXTRAFFIC_INT_M;
1726                 if (flag == ENABLE_INTRS) {
1727                         temp64 = readq(&bar0->general_int_mask);
1728                         temp64 &= ~((u64) val64);
1729                         writeq(temp64, &bar0->general_int_mask);
1730                         /*
1731                          * Enable all the Tx side interrupts
1732                          * writing 0 Enables all 64 TX interrupt levels
1733                          */
1734                         writeq(0x0, &bar0->tx_traffic_mask);
1735                 } else if (flag == DISABLE_INTRS) {
1736                         /*
1737                          * Disable Tx Traffic Intrs in the general intr mask
1738                          * register.
1739                          */
1740                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1741                         temp64 = readq(&bar0->general_int_mask);
1742                         val64 |= temp64;
1743                         writeq(val64, &bar0->general_int_mask);
1744                 }
1745         }
1746
1747         /*  Rx traffic interrupts */
1748         if (mask & RX_TRAFFIC_INTR) {
1749                 val64 = RXTRAFFIC_INT_M;
1750                 if (flag == ENABLE_INTRS) {
1751                         temp64 = readq(&bar0->general_int_mask);
1752                         temp64 &= ~((u64) val64);
1753                         writeq(temp64, &bar0->general_int_mask);
1754                         /* writing 0 Enables all 8 RX interrupt levels */
1755                         writeq(0x0, &bar0->rx_traffic_mask);
1756                 } else if (flag == DISABLE_INTRS) {
1757                         /*
1758                          * Disable Rx Traffic Intrs in the general intr mask
1759                          * register.
1760                          */
1761                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1762                         temp64 = readq(&bar0->general_int_mask);
1763                         val64 |= temp64;
1764                         writeq(val64, &bar0->general_int_mask);
1765                 }
1766         }
1767 }
1768
1769 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1770 {
1771         int ret = 0;
1772
1773         if (flag == FALSE) {
1774                 if ((!herc && (rev_id >= 4)) || herc) {
1775                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1776                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1777                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1778                                 ret = 1;
1779                         }
1780                 }else {
1781                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1782                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1783                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1784                                 ret = 1;
1785                         }
1786                 }
1787         } else {
1788                 if ((!herc && (rev_id >= 4)) || herc) {
1789                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1790                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1791                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1792                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1793                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1794                                 ret = 1;
1795                         }
1796                 } else {
1797                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1798                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1799                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1800                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1801                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1802                                 ret = 1;
1803                         }
1804                 }
1805         }
1806
1807         return ret;
1808 }
1809 /**
1810  *  verify_xena_quiescence - Checks whether the H/W is ready
1811  *  @val64 :  Value read from adapter status register.
1812  *  @flag : indicates if the adapter enable bit was ever written once
1813  *  before.
1814  *  Description: Returns whether the H/W is ready to go or not. Depending
1815  *  on whether adapter enable bit was written or not the comparison
1816  *  differs and the calling function passes the input argument flag to
1817  *  indicate this.
1818  *  Return: 1 If xena is quiescence
1819  *          0 If Xena is not quiescence
1820  */
1821
1822 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1823 {
1824         int ret = 0, herc;
1825         u64 tmp64 = ~((u64) val64);
1826         int rev_id = get_xena_rev_id(sp->pdev);
1827
1828         herc = (sp->device_type == XFRAME_II_DEVICE);
1829         if (!
1830             (tmp64 &
1831              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1832               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1833               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1834               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1835               ADAPTER_STATUS_P_PLL_LOCK))) {
1836                 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1837         }
1838
1839         return ret;
1840 }
1841
1842 /**
1843  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1844  * @sp: Pointer to device specifc structure
1845  * Description :
1846  * New procedure to clear mac address reading  problems on Alpha platforms
1847  *
1848  */
1849
1850 void fix_mac_address(nic_t * sp)
1851 {
1852         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1853         u64 val64;
1854         int i = 0;
1855
1856         while (fix_mac[i] != END_SIGN) {
1857                 writeq(fix_mac[i++], &bar0->gpio_control);
1858                 udelay(10);
1859                 val64 = readq(&bar0->gpio_control);
1860         }
1861 }
1862
1863 /**
1864  *  start_nic - Turns the device on
1865  *  @nic : device private variable.
1866  *  Description:
1867  *  This function actually turns the device on. Before this  function is
1868  *  called,all Registers are configured from their reset states
1869  *  and shared memory is allocated but the NIC is still quiescent. On
1870  *  calling this function, the device interrupts are cleared and the NIC is
1871  *  literally switched on by writing into the adapter control register.
1872  *  Return Value:
1873  *  SUCCESS on success and -1 on failure.
1874  */
1875
1876 static int start_nic(struct s2io_nic *nic)
1877 {
1878         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1879         struct net_device *dev = nic->dev;
1880         register u64 val64 = 0;
1881         u16 interruptible;
1882         u16 subid, i;
1883         mac_info_t *mac_control;
1884         struct config_param *config;
1885
1886         mac_control = &nic->mac_control;
1887         config = &nic->config;
1888
1889         /*  PRC Initialization and configuration */
1890         for (i = 0; i < config->rx_ring_num; i++) {
1891                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1892                        &bar0->prc_rxd0_n[i]);
1893
1894                 val64 = readq(&bar0->prc_ctrl_n[i]);
1895                 if (nic->config.bimodal)
1896                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1897 #ifndef CONFIG_2BUFF_MODE
1898                 val64 |= PRC_CTRL_RC_ENABLED;
1899 #else
1900                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1901 #endif
1902                 writeq(val64, &bar0->prc_ctrl_n[i]);
1903         }
1904
1905 #ifdef CONFIG_2BUFF_MODE
1906         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1907         val64 = readq(&bar0->rx_pa_cfg);
1908         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1909         writeq(val64, &bar0->rx_pa_cfg);
1910 #endif
1911
1912         /*
1913          * Enabling MC-RLDRAM. After enabling the device, we timeout
1914          * for around 100ms, which is approximately the time required
1915          * for the device to be ready for operation.
1916          */
1917         val64 = readq(&bar0->mc_rldram_mrs);
1918         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1919         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1920         val64 = readq(&bar0->mc_rldram_mrs);
1921
1922         msleep(100);    /* Delay by around 100 ms. */
1923
1924         /* Enabling ECC Protection. */
1925         val64 = readq(&bar0->adapter_control);
1926         val64 &= ~ADAPTER_ECC_EN;
1927         writeq(val64, &bar0->adapter_control);
1928
1929         /*
1930          * Clearing any possible Link state change interrupts that
1931          * could have popped up just before Enabling the card.
1932          */
1933         val64 = readq(&bar0->mac_rmac_err_reg);
1934         if (val64)
1935                 writeq(val64, &bar0->mac_rmac_err_reg);
1936
1937         /*
1938          * Verify if the device is ready to be enabled, if so enable
1939          * it.
1940          */
1941         val64 = readq(&bar0->adapter_status);
1942         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1943                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1944                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1945                           (unsigned long long) val64);
1946                 return FAILURE;
1947         }
1948
1949         /*  Enable select interrupts */
1950         if (nic->intr_type != INTA)
1951                 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
1952         else {
1953                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1954                 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1955                 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1956                 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1957         }
1958
1959         /*
1960          * With some switches, link might be already up at this point.
1961          * Because of this weird behavior, when we enable laser,
1962          * we may not get link. We need to handle this. We cannot
1963          * figure out which switch is misbehaving. So we are forced to
1964          * make a global change.
1965          */
1966
1967         /* Enabling Laser. */
1968         val64 = readq(&bar0->adapter_control);
1969         val64 |= ADAPTER_EOI_TX_ON;
1970         writeq(val64, &bar0->adapter_control);
1971
1972         /* SXE-002: Initialize link and activity LED */
1973         subid = nic->pdev->subsystem_device;
1974         if (((subid & 0xFF) >= 0x07) &&
1975             (nic->device_type == XFRAME_I_DEVICE)) {
1976                 val64 = readq(&bar0->gpio_control);
1977                 val64 |= 0x0000800000000000ULL;
1978                 writeq(val64, &bar0->gpio_control);
1979                 val64 = 0x0411040400000000ULL;
1980                 writeq(val64, (void __iomem *)bar0 + 0x2700);
1981         }
1982
1983         /*
1984          * Don't see link state interrupts on certain switches, so
1985          * directly scheduling a link state task from here.
1986          */
1987         schedule_work(&nic->set_link_task);
1988
1989         return SUCCESS;
1990 }
1991
1992 /**
1993  *  free_tx_buffers - Free all queued Tx buffers
1994  *  @nic : device private variable.
1995  *  Description:
1996  *  Free all queued Tx buffers.
1997  *  Return Value: void
1998 */
1999
2000 static void free_tx_buffers(struct s2io_nic *nic)
2001 {
2002         struct net_device *dev = nic->dev;
2003         struct sk_buff *skb;
2004         TxD_t *txdp;
2005         int i, j;
2006         mac_info_t *mac_control;
2007         struct config_param *config;
2008         int cnt = 0, frg_cnt;
2009
2010         mac_control = &nic->mac_control;
2011         config = &nic->config;
2012
2013         for (i = 0; i < config->tx_fifo_num; i++) {
2014                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2015                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2016                             list_virt_addr;
2017                         skb =
2018                             (struct sk_buff *) ((unsigned long) txdp->
2019                                                 Host_Control);
2020                         if (skb == NULL) {
2021                                 memset(txdp, 0, sizeof(TxD_t) *
2022                                        config->max_txds);
2023                                 continue;
2024                         }
2025                         frg_cnt = skb_shinfo(skb)->nr_frags;
2026                         pci_unmap_single(nic->pdev, (dma_addr_t)
2027                                          txdp->Buffer_Pointer,
2028                                          skb->len - skb->data_len,
2029                                          PCI_DMA_TODEVICE);
2030                         if (frg_cnt) {
2031                                 TxD_t *temp;
2032                                 temp = txdp;
2033                                 txdp++;
2034                                 for (j = 0; j < frg_cnt; j++, txdp++) {
2035                                         skb_frag_t *frag =
2036                                             &skb_shinfo(skb)->frags[j];
2037                                         pci_unmap_page(nic->pdev,
2038                                                        (dma_addr_t)
2039                                                        txdp->
2040                                                        Buffer_Pointer,
2041                                                        frag->size,
2042                                                        PCI_DMA_TODEVICE);
2043                                 }
2044                                 txdp = temp;
2045                         }
2046                         dev_kfree_skb(skb);
2047                         memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2048                         cnt++;
2049                 }
2050                 DBG_PRINT(INTR_DBG,
2051                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2052                           dev->name, cnt, i);
2053                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2054                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2055         }
2056 }
2057
2058 /**
2059  *   stop_nic -  To stop the nic
2060  *   @nic ; device private variable.
2061  *   Description:
2062  *   This function does exactly the opposite of what the start_nic()
2063  *   function does. This function is called to stop the device.
2064  *   Return Value:
2065  *   void.
2066  */
2067
2068 static void stop_nic(struct s2io_nic *nic)
2069 {
2070         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2071         register u64 val64 = 0;
2072         u16 interruptible, i;
2073         mac_info_t *mac_control;
2074         struct config_param *config;
2075
2076         mac_control = &nic->mac_control;
2077         config = &nic->config;
2078
2079         /*  Disable all interrupts */
2080         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2081         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2082         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2083         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2084
2085         /*  Disable PRCs */
2086         for (i = 0; i < config->rx_ring_num; i++) {
2087                 val64 = readq(&bar0->prc_ctrl_n[i]);
2088                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2089                 writeq(val64, &bar0->prc_ctrl_n[i]);
2090         }
2091 }
2092
2093 /**
2094  *  fill_rx_buffers - Allocates the Rx side skbs
2095  *  @nic:  device private variable
2096  *  @ring_no: ring number
2097  *  Description:
2098  *  The function allocates Rx side skbs and puts the physical
2099  *  address of these buffers into the RxD buffer pointers, so that the NIC
2100  *  can DMA the received frame into these locations.
2101  *  The NIC supports 3 receive modes, viz
2102  *  1. single buffer,
2103  *  2. three buffer and
2104  *  3. Five buffer modes.
2105  *  Each mode defines how many fragments the received frame will be split
2106  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2107  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2108  *  is split into 3 fragments. As of now only single buffer mode is
2109  *  supported.
2110  *   Return Value:
2111  *  SUCCESS on success or an appropriate -ve value on failure.
2112  */
2113
2114 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2115 {
2116         struct net_device *dev = nic->dev;
2117         struct sk_buff *skb;
2118         RxD_t *rxdp;
2119         int off, off1, size, block_no, block_no1;
2120         int offset, offset1;
2121         u32 alloc_tab = 0;
2122         u32 alloc_cnt;
2123         mac_info_t *mac_control;
2124         struct config_param *config;
2125 #ifdef CONFIG_2BUFF_MODE
2126         RxD_t *rxdpnext;
2127         int nextblk;
2128         u64 tmp;
2129         buffAdd_t *ba;
2130         dma_addr_t rxdpphys;
2131 #endif
2132 #ifndef CONFIG_S2IO_NAPI
2133         unsigned long flags;
2134 #endif
2135         RxD_t *first_rxdp = NULL;
2136
2137         mac_control = &nic->mac_control;
2138         config = &nic->config;
2139         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2140             atomic_read(&nic->rx_bufs_left[ring_no]);
2141         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2142             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2143
2144         while (alloc_tab < alloc_cnt) {
2145                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2146                     block_index;
2147                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2148                     block_index;
2149                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2150                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2151 #ifndef CONFIG_2BUFF_MODE
2152                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2153                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2154 #else
2155                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2156                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2157 #endif
2158
2159                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2160                     block_virt_addr + off;
2161                 if ((offset == offset1) && (rxdp->Host_Control)) {
2162                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2163                         DBG_PRINT(INTR_DBG, " info equated\n");
2164                         goto end;
2165                 }
2166 #ifndef CONFIG_2BUFF_MODE
2167                 if (rxdp->Control_1 == END_OF_BLOCK) {
2168                         mac_control->rings[ring_no].rx_curr_put_info.
2169                             block_index++;
2170                         mac_control->rings[ring_no].rx_curr_put_info.
2171                             block_index %= mac_control->rings[ring_no].block_count;
2172                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
2173                                 block_index;
2174                         off++;
2175                         off %= (MAX_RXDS_PER_BLOCK + 1);
2176                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2177                             off;
2178                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2179                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2180                                   dev->name, rxdp);
2181                 }
2182 #ifndef CONFIG_S2IO_NAPI
2183                 spin_lock_irqsave(&nic->put_lock, flags);
2184                 mac_control->rings[ring_no].put_pos =
2185                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2186                 spin_unlock_irqrestore(&nic->put_lock, flags);
2187 #endif
2188 #else
2189                 if (rxdp->Host_Control == END_OF_BLOCK) {
2190                         mac_control->rings[ring_no].rx_curr_put_info.
2191                             block_index++;
2192                         mac_control->rings[ring_no].rx_curr_put_info.block_index
2193                             %= mac_control->rings[ring_no].block_count;
2194                         block_no = mac_control->rings[ring_no].rx_curr_put_info
2195                             .block_index;
2196                         off = 0;
2197                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2198                                   dev->name, block_no,
2199                                   (unsigned long long) rxdp->Control_1);
2200                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2201                             off;
2202                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2203                             block_virt_addr;
2204                 }
2205 #ifndef CONFIG_S2IO_NAPI
2206                 spin_lock_irqsave(&nic->put_lock, flags);
2207                 mac_control->rings[ring_no].put_pos = (block_no *
2208                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
2209                 spin_unlock_irqrestore(&nic->put_lock, flags);
2210 #endif
2211 #endif
2212
2213 #ifndef CONFIG_2BUFF_MODE
2214                 if (rxdp->Control_1 & RXD_OWN_XENA)
2215 #else
2216                 if (rxdp->Control_2 & BIT(0))
2217 #endif
2218                 {
2219                         mac_control->rings[ring_no].rx_curr_put_info.
2220                             offset = off;
2221                         goto end;
2222                 }
2223 #ifdef  CONFIG_2BUFF_MODE
2224                 /*
2225                  * RxDs Spanning cache lines will be replenished only
2226                  * if the succeeding RxD is also owned by Host. It
2227                  * will always be the ((8*i)+3) and ((8*i)+6)
2228                  * descriptors for the 48 byte descriptor. The offending
2229                  * decsriptor is of-course the 3rd descriptor.
2230                  */
2231                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2232                     block_dma_addr + (off * sizeof(RxD_t));
2233                 if (((u64) (rxdpphys)) % 128 > 80) {
2234                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2235                             block_virt_addr + (off + 1);
2236                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
2237                                 nextblk = (block_no + 1) %
2238                                     (mac_control->rings[ring_no].block_count);
2239                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
2240                                     [nextblk].block_virt_addr;
2241                         }
2242                         if (rxdpnext->Control_2 & BIT(0))
2243                                 goto end;
2244                 }
2245 #endif
2246
2247 #ifndef CONFIG_2BUFF_MODE
2248                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2249 #else
2250                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2251 #endif
2252                 if (!skb) {
2253                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2254                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2255                         if (first_rxdp) {
2256                                 wmb();
2257                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2258                         }
2259                         return -ENOMEM;
2260                 }
2261 #ifndef CONFIG_2BUFF_MODE
2262                 skb_reserve(skb, NET_IP_ALIGN);
2263                 memset(rxdp, 0, sizeof(RxD_t));
2264                 rxdp->Buffer0_ptr = pci_map_single
2265                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2266                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2267                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2268                 rxdp->Host_Control = (unsigned long) (skb);
2269                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2270                         rxdp->Control_1 |= RXD_OWN_XENA;
2271                 off++;
2272                 off %= (MAX_RXDS_PER_BLOCK + 1);
2273                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2274 #else
2275                 ba = &mac_control->rings[ring_no].ba[block_no][off];
2276                 skb_reserve(skb, BUF0_LEN);
2277                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2278                 if (tmp)
2279                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2280
2281                 memset(rxdp, 0, sizeof(RxD_t));
2282                 rxdp->Buffer2_ptr = pci_map_single
2283                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2284                      PCI_DMA_FROMDEVICE);
2285                 rxdp->Buffer0_ptr =
2286                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2287                                    PCI_DMA_FROMDEVICE);
2288                 rxdp->Buffer1_ptr =
2289                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2290                                    PCI_DMA_FROMDEVICE);
2291
2292                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2293                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2294                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2295                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
2296                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2297                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2298                         rxdp->Control_1 |= RXD_OWN_XENA;
2299                 off++;
2300                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2301 #endif
2302                 rxdp->Control_2 |= SET_RXD_MARKER;
2303
2304                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2305                         if (first_rxdp) {
2306                                 wmb();
2307                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2308                         }
2309                         first_rxdp = rxdp;
2310                 }
2311                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2312                 alloc_tab++;
2313         }
2314
2315       end:
2316         /* Transfer ownership of first descriptor to adapter just before
2317          * exiting. Before that, use memory barrier so that ownership
2318          * and other fields are seen by adapter correctly.
2319          */
2320         if (first_rxdp) {
2321                 wmb();
2322                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2323         }
2324
2325         return SUCCESS;
2326 }
2327
2328 /**
2329  *  free_rx_buffers - Frees all Rx buffers
2330  *  @sp: device private variable.
2331  *  Description:
2332  *  This function will free all Rx buffers allocated by host.
2333  *  Return Value:
2334  *  NONE.
2335  */
2336
2337 static void free_rx_buffers(struct s2io_nic *sp)
2338 {
2339         struct net_device *dev = sp->dev;
2340         int i, j, blk = 0, off, buf_cnt = 0;
2341         RxD_t *rxdp;
2342         struct sk_buff *skb;
2343         mac_info_t *mac_control;
2344         struct config_param *config;
2345 #ifdef CONFIG_2BUFF_MODE
2346         buffAdd_t *ba;
2347 #endif
2348
2349         mac_control = &sp->mac_control;
2350         config = &sp->config;
2351
2352         for (i = 0; i < config->rx_ring_num; i++) {
2353                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2354                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2355                         rxdp = mac_control->rings[i].rx_blocks[blk].
2356                                 block_virt_addr + off;
2357
2358 #ifndef CONFIG_2BUFF_MODE
2359                         if (rxdp->Control_1 == END_OF_BLOCK) {
2360                                 rxdp =
2361                                     (RxD_t *) ((unsigned long) rxdp->
2362                                                Control_2);
2363                                 j++;
2364                                 blk++;
2365                         }
2366 #else
2367                         if (rxdp->Host_Control == END_OF_BLOCK) {
2368                                 blk++;
2369                                 continue;
2370                         }
2371 #endif
2372
2373                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2374                                 memset(rxdp, 0, sizeof(RxD_t));
2375                                 continue;
2376                         }
2377
2378                         skb =
2379                             (struct sk_buff *) ((unsigned long) rxdp->
2380                                                 Host_Control);
2381                         if (skb) {
2382 #ifndef CONFIG_2BUFF_MODE
2383                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2384                                                  rxdp->Buffer0_ptr,
2385                                                  dev->mtu +
2386                                                  HEADER_ETHERNET_II_802_3_SIZE
2387                                                  + HEADER_802_2_SIZE +
2388                                                  HEADER_SNAP_SIZE,
2389                                                  PCI_DMA_FROMDEVICE);
2390 #else
2391                                 ba = &mac_control->rings[i].ba[blk][off];
2392                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2393                                                  rxdp->Buffer0_ptr,
2394                                                  BUF0_LEN,
2395                                                  PCI_DMA_FROMDEVICE);
2396                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2397                                                  rxdp->Buffer1_ptr,
2398                                                  BUF1_LEN,
2399                                                  PCI_DMA_FROMDEVICE);
2400                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2401                                                  rxdp->Buffer2_ptr,
2402                                                  dev->mtu + BUF0_LEN + 4,
2403                                                  PCI_DMA_FROMDEVICE);
2404 #endif
2405                                 dev_kfree_skb(skb);
2406                                 atomic_dec(&sp->rx_bufs_left[i]);
2407                                 buf_cnt++;
2408                         }
2409                         memset(rxdp, 0, sizeof(RxD_t));
2410                 }
2411                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2412                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2413                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2414                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2415                 atomic_set(&sp->rx_bufs_left[i], 0);
2416                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2417                           dev->name, buf_cnt, i);
2418         }
2419 }
2420
2421 /**
2422  * s2io_poll - Rx interrupt handler for NAPI support
2423  * @dev : pointer to the device structure.
2424  * @budget : The number of packets that were budgeted to be processed
2425  * during  one pass through the 'Poll" function.
2426  * Description:
2427  * Comes into picture only if NAPI support has been incorporated. It does
2428  * the same thing that rx_intr_handler does, but not in a interrupt context
2429  * also It will process only a given number of packets.
2430  * Return value:
2431  * 0 on success and 1 if there are No Rx packets to be processed.
2432  */
2433
2434 #if defined(CONFIG_S2IO_NAPI)
2435 static int s2io_poll(struct net_device *dev, int *budget)
2436 {
2437         nic_t *nic = dev->priv;
2438         int pkt_cnt = 0, org_pkts_to_process;
2439         mac_info_t *mac_control;
2440         struct config_param *config;
2441         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2442         u64 val64;
2443         int i;
2444
2445         atomic_inc(&nic->isr_cnt);
2446         mac_control = &nic->mac_control;
2447         config = &nic->config;
2448
2449         nic->pkts_to_process = *budget;
2450         if (nic->pkts_to_process > dev->quota)
2451                 nic->pkts_to_process = dev->quota;
2452         org_pkts_to_process = nic->pkts_to_process;
2453
2454         val64 = readq(&bar0->rx_traffic_int);
2455         writeq(val64, &bar0->rx_traffic_int);
2456
2457         for (i = 0; i < config->rx_ring_num; i++) {
2458                 rx_intr_handler(&mac_control->rings[i]);
2459                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2460                 if (!nic->pkts_to_process) {
2461                         /* Quota for the current iteration has been met */
2462                         goto no_rx;
2463                 }
2464         }
2465         if (!pkt_cnt)
2466                 pkt_cnt = 1;
2467
2468         dev->quota -= pkt_cnt;
2469         *budget -= pkt_cnt;
2470         netif_rx_complete(dev);
2471
2472         for (i = 0; i < config->rx_ring_num; i++) {
2473                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2474                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2475                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2476                         break;
2477                 }
2478         }
2479         /* Re enable the Rx interrupts. */
2480         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2481         atomic_dec(&nic->isr_cnt);
2482         return 0;
2483
2484 no_rx:
2485         dev->quota -= pkt_cnt;
2486         *budget -= pkt_cnt;
2487
2488         for (i = 0; i < config->rx_ring_num; i++) {
2489                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2490                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2491                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2492                         break;
2493                 }
2494         }
2495         atomic_dec(&nic->isr_cnt);
2496         return 1;
2497 }
2498 #endif
2499
2500 /**
2501  *  rx_intr_handler - Rx interrupt handler
2502  *  @nic: device private variable.
2503  *  Description:
2504  *  If the interrupt is because of a received frame or if the
2505  *  receive ring contains fresh as yet un-processed frames,this function is
2506  *  called. It picks out the RxD at which place the last Rx processing had
2507  *  stopped and sends the skb to the OSM's Rx handler and then increments
2508  *  the offset.
2509  *  Return Value:
2510  *  NONE.
2511  */
2512 static void rx_intr_handler(ring_info_t *ring_data)
2513 {
2514         nic_t *nic = ring_data->nic;
2515         struct net_device *dev = (struct net_device *) nic->dev;
2516         int get_block, get_offset, put_block, put_offset, ring_bufs;
2517         rx_curr_get_info_t get_info, put_info;
2518         RxD_t *rxdp;
2519         struct sk_buff *skb;
2520 #ifndef CONFIG_S2IO_NAPI
2521         int pkt_cnt = 0;
2522 #endif
2523         spin_lock(&nic->rx_lock);
2524         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2525                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2526                           __FUNCTION__, dev->name);
2527                 spin_unlock(&nic->rx_lock);
2528                 return;
2529         }
2530
2531         get_info = ring_data->rx_curr_get_info;
2532         get_block = get_info.block_index;
2533         put_info = ring_data->rx_curr_put_info;
2534         put_block = put_info.block_index;
2535         ring_bufs = get_info.ring_len+1;
2536         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2537                     get_info.offset;
2538         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2539                 get_info.offset;
2540 #ifndef CONFIG_S2IO_NAPI
2541         spin_lock(&nic->put_lock);
2542         put_offset = ring_data->put_pos;
2543         spin_unlock(&nic->put_lock);
2544 #else
2545         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2546                 put_info.offset;
2547 #endif
2548         while (RXD_IS_UP2DT(rxdp) &&
2549                (((get_offset + 1) % ring_bufs) != put_offset)) {
2550                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2551                 if (skb == NULL) {
2552                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2553                                   dev->name);
2554                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2555                         spin_unlock(&nic->rx_lock);
2556                         return;
2557                 }
2558 #ifndef CONFIG_2BUFF_MODE
2559                 pci_unmap_single(nic->pdev, (dma_addr_t)
2560                                  rxdp->Buffer0_ptr,
2561                                  dev->mtu +
2562                                  HEADER_ETHERNET_II_802_3_SIZE +
2563                                  HEADER_802_2_SIZE +
2564                                  HEADER_SNAP_SIZE,
2565                                  PCI_DMA_FROMDEVICE);
2566 #else
2567                 pci_unmap_single(nic->pdev, (dma_addr_t)
2568                                  rxdp->Buffer0_ptr,
2569                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2570                 pci_unmap_single(nic->pdev, (dma_addr_t)
2571                                  rxdp->Buffer1_ptr,
2572                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2573                 pci_unmap_single(nic->pdev, (dma_addr_t)
2574                                  rxdp->Buffer2_ptr,
2575                                  dev->mtu + BUF0_LEN + 4,
2576                                  PCI_DMA_FROMDEVICE);
2577 #endif
2578                 rx_osm_handler(ring_data, rxdp);
2579                 get_info.offset++;
2580                 ring_data->rx_curr_get_info.offset =
2581                     get_info.offset;
2582                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2583                     get_info.offset;
2584                 if (get_info.offset &&
2585                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2586                         get_info.offset = 0;
2587                         ring_data->rx_curr_get_info.offset
2588                             = get_info.offset;
2589                         get_block++;
2590                         get_block %= ring_data->block_count;
2591                         ring_data->rx_curr_get_info.block_index
2592                             = get_block;
2593                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2594                 }
2595
2596                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2597                             get_info.offset;
2598 #ifdef CONFIG_S2IO_NAPI
2599                 nic->pkts_to_process -= 1;
2600                 if (!nic->pkts_to_process)
2601                         break;
2602 #else
2603                 pkt_cnt++;
2604                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2605                         break;
2606 #endif
2607         }
2608         spin_unlock(&nic->rx_lock);
2609 }
2610
2611 /**
2612  *  tx_intr_handler - Transmit interrupt handler
2613  *  @nic : device private variable
2614  *  Description:
2615  *  If an interrupt was raised to indicate DMA complete of the
2616  *  Tx packet, this function is called. It identifies the last TxD
2617  *  whose buffer was freed and frees all skbs whose data have already
2618  *  DMA'ed into the NICs internal memory.
2619  *  Return Value:
2620  *  NONE
2621  */
2622
2623 static void tx_intr_handler(fifo_info_t *fifo_data)
2624 {
2625         nic_t *nic = fifo_data->nic;
2626         struct net_device *dev = (struct net_device *) nic->dev;
2627         tx_curr_get_info_t get_info, put_info;
2628         struct sk_buff *skb;
2629         TxD_t *txdlp;
2630         u16 j, frg_cnt;
2631
2632         get_info = fifo_data->tx_curr_get_info;
2633         put_info = fifo_data->tx_curr_put_info;
2634         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2635             list_virt_addr;
2636         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2637                (get_info.offset != put_info.offset) &&
2638                (txdlp->Host_Control)) {
2639                 /* Check for TxD errors */
2640                 if (txdlp->Control_1 & TXD_T_CODE) {
2641                         unsigned long long err;
2642                         err = txdlp->Control_1 & TXD_T_CODE;
2643                         if ((err >> 48) == 0xA) {
2644                                 DBG_PRINT(TX_DBG, "TxD returned due \
2645 to loss of link\n");
2646                         }
2647                         else {
2648                                 DBG_PRINT(ERR_DBG, "***TxD error \
2649 %llx\n", err);
2650                         }
2651                 }
2652
2653                 skb = (struct sk_buff *) ((unsigned long)
2654                                 txdlp->Host_Control);
2655                 if (skb == NULL) {
2656                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2657                         __FUNCTION__);
2658                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2659                         return;
2660                 }
2661
2662                 frg_cnt = skb_shinfo(skb)->nr_frags;
2663                 nic->tx_pkt_count++;
2664
2665                 pci_unmap_single(nic->pdev, (dma_addr_t)
2666                                  txdlp->Buffer_Pointer,
2667                                  skb->len - skb->data_len,
2668                                  PCI_DMA_TODEVICE);
2669                 if (frg_cnt) {
2670                         TxD_t *temp;
2671                         temp = txdlp;
2672                         txdlp++;
2673                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2674                                 skb_frag_t *frag =
2675                                     &skb_shinfo(skb)->frags[j];
2676                                 if (!txdlp->Buffer_Pointer)
2677                                         break;
2678                                 pci_unmap_page(nic->pdev,
2679                                                (dma_addr_t)
2680                                                txdlp->
2681                                                Buffer_Pointer,
2682                                                frag->size,
2683                                                PCI_DMA_TODEVICE);
2684                         }
2685                         txdlp = temp;
2686                 }
2687                 memset(txdlp, 0,
2688                        (sizeof(TxD_t) * fifo_data->max_txds));
2689
2690                 /* Updating the statistics block */
2691                 nic->stats.tx_bytes += skb->len;
2692                 dev_kfree_skb_irq(skb);
2693
2694                 get_info.offset++;
2695                 get_info.offset %= get_info.fifo_len + 1;
2696                 txdlp = (TxD_t *) fifo_data->list_info
2697                     [get_info.offset].list_virt_addr;
2698                 fifo_data->tx_curr_get_info.offset =
2699                     get_info.offset;
2700         }
2701
2702         spin_lock(&nic->tx_lock);
2703         if (netif_queue_stopped(dev))
2704                 netif_wake_queue(dev);
2705         spin_unlock(&nic->tx_lock);
2706 }
2707
2708 /**
2709  *  alarm_intr_handler - Alarm Interrrupt handler
2710  *  @nic: device private variable
2711  *  Description: If the interrupt was neither because of Rx packet or Tx
2712  *  complete, this function is called. If the interrupt was to indicate
2713  *  a loss of link, the OSM link status handler is invoked for any other
2714  *  alarm interrupt the block that raised the interrupt is displayed
2715  *  and a H/W reset is issued.
2716  *  Return Value:
2717  *  NONE
2718 */
2719
2720 static void alarm_intr_handler(struct s2io_nic *nic)
2721 {
2722         struct net_device *dev = (struct net_device *) nic->dev;
2723         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2724         register u64 val64 = 0, err_reg = 0;
2725
2726         /* Handling link status change error Intr */
2727         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2728                 err_reg = readq(&bar0->mac_rmac_err_reg);
2729                 writeq(err_reg, &bar0->mac_rmac_err_reg);
2730                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2731                         schedule_work(&nic->set_link_task);
2732                 }
2733         }
2734
2735         /* Handling Ecc errors */
2736         val64 = readq(&bar0->mc_err_reg);
2737         writeq(val64, &bar0->mc_err_reg);
2738         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2739                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2740                         nic->mac_control.stats_info->sw_stat.
2741                                 double_ecc_errs++;
2742                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2743                                   dev->name);
2744                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2745                         if (nic->device_type != XFRAME_II_DEVICE) {
2746                                 /* Reset XframeI only if critical error */
2747                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2748                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2749                                         netif_stop_queue(dev);
2750                                         schedule_work(&nic->rst_timer_task);
2751                                 }
2752                         }
2753                 } else {
2754                         nic->mac_control.stats_info->sw_stat.
2755                                 single_ecc_errs++;
2756                 }
2757         }
2758
2759         /* In case of a serious error, the device will be Reset. */
2760         val64 = readq(&bar0->serr_source);
2761         if (val64 & SERR_SOURCE_ANY) {
2762                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2763                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n", 
2764                           (unsigned long long)val64);
2765                 netif_stop_queue(dev);
2766                 schedule_work(&nic->rst_timer_task);
2767         }
2768
2769         /*
2770          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2771          * Error occurs, the adapter will be recycled by disabling the
2772          * adapter enable bit and enabling it again after the device
2773          * becomes Quiescent.
2774          */
2775         val64 = readq(&bar0->pcc_err_reg);
2776         writeq(val64, &bar0->pcc_err_reg);
2777         if (val64 & PCC_FB_ECC_DB_ERR) {
2778                 u64 ac = readq(&bar0->adapter_control);
2779                 ac &= ~(ADAPTER_CNTL_EN);
2780                 writeq(ac, &bar0->adapter_control);
2781                 ac = readq(&bar0->adapter_control);
2782                 schedule_work(&nic->set_link_task);
2783         }
2784
2785         /* Other type of interrupts are not being handled now,  TODO */
2786 }
2787
2788 /**
2789  *  wait_for_cmd_complete - waits for a command to complete.
2790  *  @sp : private member of the device structure, which is a pointer to the
2791  *  s2io_nic structure.
2792  *  Description: Function that waits for a command to Write into RMAC
2793  *  ADDR DATA registers to be completed and returns either success or
2794  *  error depending on whether the command was complete or not.
2795  *  Return value:
2796  *   SUCCESS on success and FAILURE on failure.
2797  */
2798
2799 int wait_for_cmd_complete(nic_t * sp)
2800 {
2801         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2802         int ret = FAILURE, cnt = 0;
2803         u64 val64;
2804
2805         while (TRUE) {
2806                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2807                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2808                         ret = SUCCESS;
2809                         break;
2810                 }
2811                 msleep(50);
2812                 if (cnt++ > 10)
2813                         break;
2814         }
2815
2816         return ret;
2817 }
2818
2819 /**
2820  *  s2io_reset - Resets the card.
2821  *  @sp : private member of the device structure.
2822  *  Description: Function to Reset the card. This function then also
2823  *  restores the previously saved PCI configuration space registers as
2824  *  the card reset also resets the configuration space.
2825  *  Return value:
2826  *  void.
2827  */
2828
2829 void s2io_reset(nic_t * sp)
2830 {
2831         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2832         u64 val64;
2833         u16 subid, pci_cmd;
2834
2835         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2836         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2837
2838         val64 = SW_RESET_ALL;
2839         writeq(val64, &bar0->sw_reset);
2840
2841         /*
2842          * At this stage, if the PCI write is indeed completed, the
2843          * card is reset and so is the PCI Config space of the device.
2844          * So a read cannot be issued at this stage on any of the
2845          * registers to ensure the write into "sw_reset" register
2846          * has gone through.
2847          * Question: Is there any system call that will explicitly force
2848          * all the write commands still pending on the bus to be pushed
2849          * through?
2850          * As of now I'am just giving a 250ms delay and hoping that the
2851          * PCI write to sw_reset register is done by this time.
2852          */
2853         msleep(250);
2854
2855         /* Restore the PCI state saved during initialization. */
2856         pci_restore_state(sp->pdev);
2857         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2858                                      pci_cmd);
2859         s2io_init_pci(sp);
2860
2861         msleep(250);
2862
2863         /* Set swapper to enable I/O register access */
2864         s2io_set_swapper(sp);
2865
2866         /* Restore the MSIX table entries from local variables */
2867         restore_xmsi_data(sp);
2868
2869         /* Clear certain PCI/PCI-X fields after reset */
2870         if (sp->device_type == XFRAME_II_DEVICE) {
2871                 /* Clear parity err detect bit */
2872                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2873
2874                 /* Clearing PCIX Ecc status register */
2875                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2876
2877                 /* Clearing PCI_STATUS error reflected here */
2878                 writeq(BIT(62), &bar0->txpic_int_reg);
2879         }
2880
2881         /* Reset device statistics maintained by OS */
2882         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2883
2884         /* SXE-002: Configure link and activity LED to turn it off */
2885         subid = sp->pdev->subsystem_device;
2886         if (((subid & 0xFF) >= 0x07) &&
2887             (sp->device_type == XFRAME_I_DEVICE)) {
2888                 val64 = readq(&bar0->gpio_control);
2889                 val64 |= 0x0000800000000000ULL;
2890                 writeq(val64, &bar0->gpio_control);
2891                 val64 = 0x0411040400000000ULL;
2892                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2893         }
2894
2895         /*
2896          * Clear spurious ECC interrupts that would have occured on
2897          * XFRAME II cards after reset.
2898          */
2899         if (sp->device_type == XFRAME_II_DEVICE) {
2900                 val64 = readq(&bar0->pcc_err_reg);
2901                 writeq(val64, &bar0->pcc_err_reg);
2902         }
2903
2904         sp->device_enabled_once = FALSE;
2905 }
2906
2907 /**
2908  *  s2io_set_swapper - to set the swapper controle on the card
2909  *  @sp : private member of the device structure,
2910  *  pointer to the s2io_nic structure.
2911  *  Description: Function to set the swapper control on the card
2912  *  correctly depending on the 'endianness' of the system.
2913  *  Return value:
2914  *  SUCCESS on success and FAILURE on failure.
2915  */
2916
2917 int s2io_set_swapper(nic_t * sp)
2918 {
2919         struct net_device *dev = sp->dev;
2920         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2921         u64 val64, valt, valr;
2922
2923         /*
2924          * Set proper endian settings and verify the same by reading
2925          * the PIF Feed-back register.
2926          */
2927
2928         val64 = readq(&bar0->pif_rd_swapper_fb);
2929         if (val64 != 0x0123456789ABCDEFULL) {
2930                 int i = 0;
2931                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2932                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2933                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2934                                 0};                     /* FE=0, SE=0 */
2935
2936                 while(i<4) {
2937                         writeq(value[i], &bar0->swapper_ctrl);
2938                         val64 = readq(&bar0->pif_rd_swapper_fb);
2939                         if (val64 == 0x0123456789ABCDEFULL)
2940                                 break;
2941                         i++;
2942                 }
2943                 if (i == 4) {
2944                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2945                                 dev->name);
2946                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2947                                 (unsigned long long) val64);
2948                         return FAILURE;
2949                 }
2950                 valr = value[i];
2951         } else {
2952                 valr = readq(&bar0->swapper_ctrl);
2953         }
2954
2955         valt = 0x0123456789ABCDEFULL;
2956         writeq(valt, &bar0->xmsi_address);
2957         val64 = readq(&bar0->xmsi_address);
2958
2959         if(val64 != valt) {
2960                 int i = 0;
2961                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2962                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2963                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2964                                 0};                     /* FE=0, SE=0 */
2965
2966                 while(i<4) {
2967                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2968                         writeq(valt, &bar0->xmsi_address);
2969                         val64 = readq(&bar0->xmsi_address);
2970                         if(val64 == valt)
2971                                 break;
2972                         i++;
2973                 }
2974                 if(i == 4) {
2975                         unsigned long long x = val64;
2976                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2977                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2978                         return FAILURE;
2979                 }
2980         }
2981         val64 = readq(&bar0->swapper_ctrl);
2982         val64 &= 0xFFFF000000000000ULL;
2983
2984 #ifdef  __BIG_ENDIAN
2985         /*
2986          * The device by default set to a big endian format, so a
2987          * big endian driver need not set anything.
2988          */
2989         val64 |= (SWAPPER_CTRL_TXP_FE |
2990                  SWAPPER_CTRL_TXP_SE |
2991                  SWAPPER_CTRL_TXD_R_FE |
2992                  SWAPPER_CTRL_TXD_W_FE |
2993                  SWAPPER_CTRL_TXF_R_FE |
2994                  SWAPPER_CTRL_RXD_R_FE |
2995                  SWAPPER_CTRL_RXD_W_FE |
2996                  SWAPPER_CTRL_RXF_W_FE |
2997                  SWAPPER_CTRL_XMSI_FE |
2998                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2999         if (sp->intr_type == INTA)
3000                 val64 |= SWAPPER_CTRL_XMSI_SE;
3001         writeq(val64, &bar0->swapper_ctrl);
3002 #else
3003         /*
3004          * Initially we enable all bits to make it accessible by the
3005          * driver, then we selectively enable only those bits that
3006          * we want to set.
3007          */
3008         val64 |= (SWAPPER_CTRL_TXP_FE |
3009                  SWAPPER_CTRL_TXP_SE |
3010                  SWAPPER_CTRL_TXD_R_FE |
3011                  SWAPPER_CTRL_TXD_R_SE |
3012                  SWAPPER_CTRL_TXD_W_FE |
3013                  SWAPPER_CTRL_TXD_W_SE |
3014                  SWAPPER_CTRL_TXF_R_FE |
3015                  SWAPPER_CTRL_RXD_R_FE |
3016                  SWAPPER_CTRL_RXD_R_SE |
3017                  SWAPPER_CTRL_RXD_W_FE |
3018                  SWAPPER_CTRL_RXD_W_SE |
3019                  SWAPPER_CTRL_RXF_W_FE |
3020                  SWAPPER_CTRL_XMSI_FE |
3021                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3022         if (sp->intr_type == INTA)
3023                 val64 |= SWAPPER_CTRL_XMSI_SE;
3024         writeq(val64, &bar0->swapper_ctrl);
3025 #endif
3026         val64 = readq(&bar0->swapper_ctrl);
3027
3028         /*
3029          * Verifying if endian settings are accurate by reading a
3030          * feedback register.
3031          */
3032         val64 = readq(&bar0->pif_rd_swapper_fb);
3033         if (val64 != 0x0123456789ABCDEFULL) {
3034                 /* Endian settings are incorrect, calls for another dekko. */
3035                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3036                           dev->name);
3037                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3038                           (unsigned long long) val64);
3039                 return FAILURE;
3040         }
3041
3042         return SUCCESS;
3043 }
3044
3045 int wait_for_msix_trans(nic_t *nic, int i)
3046 {
3047         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3048         u64 val64;
3049         int ret = 0, cnt = 0;
3050
3051         do {
3052                 val64 = readq(&bar0->xmsi_access);
3053                 if (!(val64 & BIT(15)))
3054                         break;
3055                 mdelay(1);
3056                 cnt++;
3057         } while(cnt < 5);
3058         if (cnt == 5) {
3059                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3060                 ret = 1;
3061         }
3062
3063         return ret;
3064 }
3065
3066 void restore_xmsi_data(nic_t *nic)
3067 {
3068         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3069         u64 val64;
3070         int i;
3071
3072         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3073                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3074                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3075                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3076                 writeq(val64, &bar0->xmsi_access);
3077                 if (wait_for_msix_trans(nic, i)) {
3078                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3079                         continue;
3080                 }
3081         }
3082 }
3083
3084 void store_xmsi_data(nic_t *nic)
3085 {
3086         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3087         u64 val64, addr, data;
3088         int i;
3089
3090         /* Store and display */
3091         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3092                 val64 = (BIT(15) | vBIT(i, 26, 6));
3093                 writeq(val64, &bar0->xmsi_access);
3094                 if (wait_for_msix_trans(nic, i)) {
3095                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3096                         continue;
3097                 }
3098                 addr = readq(&bar0->xmsi_address);
3099                 data = readq(&bar0->xmsi_data);
3100                 if (addr && data) {
3101                         nic->msix_info[i].addr = addr;
3102                         nic->msix_info[i].data = data;
3103                 }
3104         }
3105 }
3106
3107 int s2io_enable_msi(nic_t *nic)
3108 {
3109         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3110         u16 msi_ctrl, msg_val;
3111         struct config_param *config = &nic->config;
3112         struct net_device *dev = nic->dev;
3113         u64 val64, tx_mat, rx_mat;
3114         int i, err;
3115
3116         val64 = readq(&bar0->pic_control);
3117         val64 &= ~BIT(1);
3118         writeq(val64, &bar0->pic_control);
3119
3120         err = pci_enable_msi(nic->pdev);
3121         if (err) {
3122                 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3123                           nic->dev->name);
3124                 return err;
3125         }
3126
3127         /*
3128          * Enable MSI and use MSI-1 in stead of the standard MSI-0
3129          * for interrupt handling.
3130          */
3131         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3132         msg_val ^= 0x1;
3133         pci_write_config_word(nic->pdev, 0x4c, msg_val);
3134         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3135
3136         pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3137         msi_ctrl |= 0x10;
3138         pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3139
3140         /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3141         tx_mat = readq(&bar0->tx_mat0_n[0]);
3142         for (i=0; i<config->tx_fifo_num; i++) {
3143                 tx_mat |= TX_MAT_SET(i, 1);
3144         }
3145         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3146
3147         rx_mat = readq(&bar0->rx_mat);
3148         for (i=0; i<config->rx_ring_num; i++) {
3149                 rx_mat |= RX_MAT_SET(i, 1);
3150         }
3151         writeq(rx_mat, &bar0->rx_mat);
3152
3153         dev->irq = nic->pdev->irq;
3154         return 0;
3155 }
3156
3157 int s2io_enable_msi_x(nic_t *nic)
3158 {
3159         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3160         u64 tx_mat, rx_mat;
3161         u16 msi_control; /* Temp variable */
3162         int ret, i, j, msix_indx = 1;
3163
3164         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3165                                GFP_KERNEL);
3166         if (nic->entries == NULL) {
3167                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3168                 return -ENOMEM;
3169         }
3170         memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3171
3172         nic->s2io_entries =
3173                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3174                                    GFP_KERNEL);
3175         if (nic->s2io_entries == NULL) {
3176                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3177                 kfree(nic->entries);
3178                 return -ENOMEM;
3179         }
3180         memset(nic->s2io_entries, 0,
3181                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3182
3183         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3184                 nic->entries[i].entry = i;
3185                 nic->s2io_entries[i].entry = i;
3186                 nic->s2io_entries[i].arg = NULL;
3187                 nic->s2io_entries[i].in_use = 0;
3188         }
3189
3190         tx_mat = readq(&bar0->tx_mat0_n[0]);
3191         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3192                 tx_mat |= TX_MAT_SET(i, msix_indx);
3193                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3194                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3195                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3196         }
3197         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3198
3199         if (!nic->config.bimodal) {
3200                 rx_mat = readq(&bar0->rx_mat);
3201                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3202                         rx_mat |= RX_MAT_SET(j, msix_indx);
3203                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3204                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3205                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3206                 }
3207                 writeq(rx_mat, &bar0->rx_mat);
3208         } else {
3209                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3210                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3211                         tx_mat |= TX_MAT_SET(i, msix_indx);
3212                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3213                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3214                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3215                 }
3216                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3217         }
3218
3219         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3220         if (ret) {
3221                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3222                 kfree(nic->entries);
3223                 kfree(nic->s2io_entries);
3224                 nic->entries = NULL;
3225                 nic->s2io_entries = NULL;
3226                 return -ENOMEM;
3227         }
3228
3229         /*
3230          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3231          * in the herc NIC. (Temp change, needs to be removed later)
3232          */
3233         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3234         msi_control |= 0x1; /* Enable MSI */
3235         pci_write_config_word(nic->pdev, 0x42, msi_control);
3236
3237         return 0;
3238 }
3239
3240 /* ********************************************************* *
3241  * Functions defined below concern the OS part of the driver *
3242  * ********************************************************* */
3243
3244 /**
3245  *  s2io_open - open entry point of the driver
3246  *  @dev : pointer to the device structure.
3247  *  Description:
3248  *  This function is the open entry point of the driver. It mainly calls a
3249  *  function to allocate Rx buffers and inserts them into the buffer
3250  *  descriptors and then enables the Rx part of the NIC.
3251  *  Return value:
3252  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3253  *   file on failure.
3254  */
3255
3256 int s2io_open(struct net_device *dev)
3257 {
3258         nic_t *sp = dev->priv;
3259         int err = 0;
3260         int i;
3261         u16 msi_control; /* Temp variable */
3262
3263         /*
3264          * Make sure you have link off by default every time
3265          * Nic is initialized
3266          */
3267         netif_carrier_off(dev);
3268         sp->last_link_state = 0;
3269
3270         /* Initialize H/W and enable interrupts */
3271         if (s2io_card_up(sp)) {
3272                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3273                           dev->name);
3274                 err = -ENODEV;
3275                 goto hw_init_failed;
3276         }
3277
3278         /* Store the values of the MSIX table in the nic_t structure */
3279         store_xmsi_data(sp);
3280
3281         /* After proper initialization of H/W, register ISR */
3282         if (sp->intr_type == MSI) {
3283                 err = request_irq((int) sp->pdev->irq, s2io_msi_handle, 
3284                         SA_SHIRQ, sp->name, dev);
3285                 if (err) {
3286                         DBG_PRINT(ERR_DBG, "%s: MSI registration \
3287 failed\n", dev->name);
3288                         goto isr_registration_failed;
3289                 }
3290         }
3291         if (sp->intr_type == MSI_X) {
3292                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3293                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3294                                 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3295                                         dev->name, i);
3296                                 err = request_irq(sp->entries[i].vector,
3297                                           s2io_msix_fifo_handle, 0, sp->desc1,
3298                                           sp->s2io_entries[i].arg);
3299                                 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1, 
3300                                                         sp->msix_info[i].addr);
3301                         } else {
3302                                 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3303                                         dev->name, i);
3304                                 err = request_irq(sp->entries[i].vector,
3305                                           s2io_msix_ring_handle, 0, sp->desc2,
3306                                           sp->s2io_entries[i].arg);
3307                                 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2, 
3308                                                         sp->msix_info[i].addr);
3309                         }
3310                         if (err) {
3311                                 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3312 failed\n", dev->name, i);
3313                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3314                                 goto isr_registration_failed;
3315                         }
3316                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3317                 }
3318         }
3319         if (sp->intr_type == INTA) {
3320                 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3321                                 sp->name, dev);
3322                 if (err) {
3323                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3324                                   dev->name);
3325                         goto isr_registration_failed;
3326                 }
3327         }
3328
3329         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3330                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3331                 err = -ENODEV;
3332                 goto setting_mac_address_failed;
3333         }
3334
3335         netif_start_queue(dev);
3336         return 0;
3337
3338 setting_mac_address_failed:
3339         if (sp->intr_type != MSI_X)
3340                 free_irq(sp->pdev->irq, dev);
3341 isr_registration_failed:
3342         del_timer_sync(&sp->alarm_timer);
3343         if (sp->intr_type == MSI_X) {
3344                 if (sp->device_type == XFRAME_II_DEVICE) {
3345                         for (i=1; (sp->s2io_entries[i].in_use == 
3346                                 MSIX_REGISTERED_SUCCESS); i++) {
3347                                 int vector = sp->entries[i].vector;
3348                                 void *arg = sp->s2io_entries[i].arg;
3349
3350                                 free_irq(vector, arg);
3351                         }
3352                         pci_disable_msix(sp->pdev);
3353
3354                         /* Temp */
3355                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3356                         msi_control &= 0xFFFE; /* Disable MSI */
3357                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3358                 }
3359         }
3360         else if (sp->intr_type == MSI)
3361                 pci_disable_msi(sp->pdev);
3362         s2io_reset(sp);
3363 hw_init_failed:
3364         if (sp->intr_type == MSI_X) {
3365                 if (sp->entries)
3366                         kfree(sp->entries);
3367                 if (sp->s2io_entries)
3368                         kfree(sp->s2io_entries);
3369         }
3370         return err;
3371 }
3372
3373 /**
3374  *  s2io_close -close entry point of the driver
3375  *  @dev : device pointer.
3376  *  Description:
3377  *  This is the stop entry point of the driver. It needs to undo exactly
3378  *  whatever was done by the open entry point,thus it's usually referred to
3379  *  as the close function.Among other things this function mainly stops the
3380  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3381  *  Return value:
3382  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3383  *  file on failure.
3384  */
3385
3386 int s2io_close(struct net_device *dev)
3387 {
3388         nic_t *sp = dev->priv;
3389         int i;
3390         u16 msi_control;
3391
3392         flush_scheduled_work();
3393         netif_stop_queue(dev);
3394         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3395         s2io_card_down(sp);
3396
3397         if (sp->intr_type == MSI_X) {
3398                 if (sp->device_type == XFRAME_II_DEVICE) {
3399                         for (i=1; (sp->s2io_entries[i].in_use == 
3400                                         MSIX_REGISTERED_SUCCESS); i++) {
3401                                 int vector = sp->entries[i].vector;
3402                                 void *arg = sp->s2io_entries[i].arg;
3403
3404                                 free_irq(vector, arg);
3405                         }
3406                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3407                         msi_control &= 0xFFFE; /* Disable MSI */
3408                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3409
3410                         pci_disable_msix(sp->pdev);
3411                 }
3412         }
3413         else {
3414                 free_irq(sp->pdev->irq, dev);
3415                 if (sp->intr_type == MSI)
3416                         pci_disable_msi(sp->pdev);
3417         }       
3418         sp->device_close_flag = TRUE;   /* Device is shut down. */
3419         return 0;
3420 }
3421
3422 /**
3423  *  s2io_xmit - Tx entry point of te driver
3424  *  @skb : the socket buffer containing the Tx data.
3425  *  @dev : device pointer.
3426  *  Description :
3427  *  This function is the Tx entry point of the driver. S2IO NIC supports
3428  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3429  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3430  *  not be upadted.
3431  *  Return value:
3432  *  0 on success & 1 on failure.
3433  */
3434
3435 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3436 {
3437         nic_t *sp = dev->priv;
3438         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3439         register u64 val64;
3440         TxD_t *txdp;
3441         TxFIFO_element_t __iomem *tx_fifo;
3442         unsigned long flags;
3443 #ifdef NETIF_F_TSO
3444         int mss;
3445 #endif
3446         u16 vlan_tag = 0;
3447         int vlan_priority = 0;
3448         mac_info_t *mac_control;
3449         struct config_param *config;
3450
3451         mac_control = &sp->mac_control;
3452         config = &sp->config;
3453
3454         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3455         spin_lock_irqsave(&sp->tx_lock, flags);
3456         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3457                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3458                           dev->name);
3459                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3460                 dev_kfree_skb(skb);
3461                 return 0;
3462         }
3463
3464         queue = 0;
3465
3466         /* Get Fifo number to Transmit based on vlan priority */
3467         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3468                 vlan_tag = vlan_tx_tag_get(skb);
3469                 vlan_priority = vlan_tag >> 13;
3470                 queue = config->fifo_mapping[vlan_priority];
3471         }
3472
3473         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3474         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3475         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3476                 list_virt_addr;
3477
3478         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3479         /* Avoid "put" pointer going beyond "get" pointer */
3480         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3481                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3482                 netif_stop_queue(dev);
3483                 dev_kfree_skb(skb);
3484                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3485                 return 0;
3486         }
3487
3488         /* A buffer with no data will be dropped */
3489         if (!skb->len) {
3490                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3491                 dev_kfree_skb(skb);
3492                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3493                 return 0;
3494         }
3495
3496 #ifdef NETIF_F_TSO
3497         mss = skb_shinfo(skb)->tso_size;
3498         if (mss) {
3499                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3500                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3501         }
3502 #endif
3503
3504         frg_cnt = skb_shinfo(skb)->nr_frags;
3505         frg_len = skb->len - skb->data_len;
3506
3507         txdp->Buffer_Pointer = pci_map_single
3508             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3509         txdp->Host_Control = (unsigned long) skb;
3510         if (skb->ip_summed == CHECKSUM_HW) {
3511                 txdp->Control_2 |=
3512                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3513                      TXD_TX_CKO_UDP_EN);
3514         }
3515
3516         txdp->Control_2 |= config->tx_intr_type;
3517
3518         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3519                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3520                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3521         }
3522
3523         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3524                             TXD_GATHER_CODE_FIRST);
3525         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3526
3527         /* For fragmented SKB. */
3528         for (i = 0; i < frg_cnt; i++) {
3529                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3530                 /* A '0' length fragment will be ignored */
3531                 if (!frag->size)
3532                         continue;
3533                 txdp++;
3534                 txdp->Buffer_Pointer = (u64) pci_map_page
3535                     (sp->pdev, frag->page, frag->page_offset,
3536                      frag->size, PCI_DMA_TODEVICE);
3537                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3538         }
3539         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3540
3541         tx_fifo = mac_control->tx_FIFO_start[queue];
3542         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3543         writeq(val64, &tx_fifo->TxDL_Pointer);
3544
3545         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3546                  TX_FIFO_LAST_LIST);
3547
3548 #ifdef NETIF_F_TSO
3549         if (mss)
3550                 val64 |= TX_FIFO_SPECIAL_FUNC;
3551 #endif
3552         writeq(val64, &tx_fifo->List_Control);
3553
3554         mmiowb();
3555
3556         put_off++;
3557         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3558         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3559
3560         /* Avoid "put" pointer going beyond "get" pointer */
3561         if (((put_off + 1) % queue_len) == get_off) {
3562                 DBG_PRINT(TX_DBG,
3563                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3564                           put_off, get_off);
3565                 netif_stop_queue(dev);
3566         }
3567
3568         dev->trans_start = jiffies;
3569         spin_unlock_irqrestore(&sp->tx_lock, flags);
3570
3571         return 0;
3572 }
3573
3574 static void
3575 s2io_alarm_handle(unsigned long data)
3576 {
3577         nic_t *sp = (nic_t *)data;
3578
3579         alarm_intr_handler(sp);
3580         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3581 }
3582
3583 static irqreturn_t
3584 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3585 {
3586         struct net_device *dev = (struct net_device *) dev_id;
3587         nic_t *sp = dev->priv;
3588         int i;
3589         int ret;
3590         mac_info_t *mac_control;
3591         struct config_param *config;
3592
3593         atomic_inc(&sp->isr_cnt);
3594         mac_control = &sp->mac_control;
3595         config = &sp->config;
3596         DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3597
3598         /* If Intr is because of Rx Traffic */
3599         for (i = 0; i < config->rx_ring_num; i++)
3600                 rx_intr_handler(&mac_control->rings[i]);
3601
3602         /* If Intr is because of Tx Traffic */
3603         for (i = 0; i < config->tx_fifo_num; i++)
3604                 tx_intr_handler(&mac_control->fifos[i]);
3605
3606         /*
3607          * If the Rx buffer count is below the panic threshold then
3608          * reallocate the buffers from the interrupt handler itself,
3609          * else schedule a tasklet to reallocate the buffers.
3610          */
3611         for (i = 0; i < config->rx_ring_num; i++) {
3612                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3613                 int level = rx_buffer_level(sp, rxb_size, i);
3614
3615                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3616                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3617                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3618                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3619                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3620                                           dev->name);
3621                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3622                                 clear_bit(0, (&sp->tasklet_status));
3623                                 atomic_dec(&sp->isr_cnt);
3624                                 return IRQ_HANDLED;
3625                         }
3626                         clear_bit(0, (&sp->tasklet_status));
3627                 } else if (level == LOW) {
3628                         tasklet_schedule(&sp->task);
3629                 }
3630         }
3631
3632         atomic_dec(&sp->isr_cnt);
3633         return IRQ_HANDLED;
3634 }
3635
3636 static irqreturn_t
3637 s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
3638 {
3639         ring_info_t *ring = (ring_info_t *)dev_id;
3640         nic_t *sp = ring->nic;
3641         int rxb_size, level, rng_n;
3642
3643         atomic_inc(&sp->isr_cnt);
3644         rx_intr_handler(ring);
3645
3646         rng_n = ring->ring_no;
3647         rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3648         level = rx_buffer_level(sp, rxb_size, rng_n);
3649
3650         if ((level == PANIC) && (!TASKLET_IN_USE)) {
3651                 int ret;
3652                 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3653                 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3654                 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3655                         DBG_PRINT(ERR_DBG, "Out of memory in %s",
3656                                   __FUNCTION__);
3657                         clear_bit(0, (&sp->tasklet_status));
3658                         return IRQ_HANDLED;
3659                 }
3660                 clear_bit(0, (&sp->tasklet_status));
3661         } else if (level == LOW) {
3662                 tasklet_schedule(&sp->task);
3663         }
3664         atomic_dec(&sp->isr_cnt);
3665
3666         return IRQ_HANDLED;
3667 }
3668
3669 static irqreturn_t
3670 s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
3671 {
3672         fifo_info_t *fifo = (fifo_info_t *)dev_id;
3673         nic_t *sp = fifo->nic;
3674
3675         atomic_inc(&sp->isr_cnt);
3676         tx_intr_handler(fifo);
3677         atomic_dec(&sp->isr_cnt);
3678         return IRQ_HANDLED;
3679 }
3680
3681 static void s2io_txpic_intr_handle(nic_t *sp)
3682 {
3683         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3684         u64 val64;
3685
3686         val64 = readq(&bar0->pic_int_status);
3687         if (val64 & PIC_INT_GPIO) {
3688                 val64 = readq(&bar0->gpio_int_reg);
3689                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3690                     (val64 & GPIO_INT_REG_LINK_UP)) {
3691                         val64 |=  GPIO_INT_REG_LINK_DOWN;
3692                         val64 |= GPIO_INT_REG_LINK_UP;
3693                         writeq(val64, &bar0->gpio_int_reg);
3694                         goto masking;
3695                 }
3696
3697                 if (((sp->last_link_state == LINK_UP) &&
3698                         (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3699                 ((sp->last_link_state == LINK_DOWN) &&
3700                 (val64 & GPIO_INT_REG_LINK_UP))) {
3701                         val64 = readq(&bar0->gpio_int_mask);
3702                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3703                         val64 |= GPIO_INT_MASK_LINK_UP;
3704                         writeq(val64, &bar0->gpio_int_mask);
3705                         s2io_set_link((unsigned long)sp);
3706                 }
3707 masking:
3708                 if (sp->last_link_state == LINK_UP) {
3709                         /*enable down interrupt */
3710                         val64 = readq(&bar0->gpio_int_mask);
3711                         /* unmasks link down intr */
3712                         val64 &=  ~GPIO_INT_MASK_LINK_DOWN;
3713                         /* masks link up intr */
3714                         val64 |= GPIO_INT_MASK_LINK_UP;
3715                         writeq(val64, &bar0->gpio_int_mask);
3716                 } else {
3717                         /*enable UP Interrupt */
3718                         val64 = readq(&bar0->gpio_int_mask);
3719                         /* unmasks link up interrupt */
3720                         val64 &= ~GPIO_INT_MASK_LINK_UP;
3721                         /* masks link down interrupt */
3722                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3723                         writeq(val64, &bar0->gpio_int_mask);
3724                 }
3725         }
3726 }
3727
3728 /**
3729  *  s2io_isr - ISR handler of the device .
3730  *  @irq: the irq of the device.
3731  *  @dev_id: a void pointer to the dev structure of the NIC.
3732  *  @pt_regs: pointer to the registers pushed on the stack.
3733  *  Description:  This function is the ISR handler of the device. It
3734  *  identifies the reason for the interrupt and calls the relevant
3735  *  service routines. As a contongency measure, this ISR allocates the
3736  *  recv buffers, if their numbers are below the panic value which is
3737  *  presently set to 25% of the original number of rcv buffers allocated.
3738  *  Return value:
3739  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
3740  *   IRQ_NONE: will be returned if interrupt is not from our device
3741  */
3742 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3743 {
3744         struct net_device *dev = (struct net_device *) dev_id;
3745         nic_t *sp = dev->priv;
3746         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3747         int i;
3748         u64 reason = 0, val64;
3749         mac_info_t *mac_control;
3750         struct config_param *config;
3751
3752         atomic_inc(&sp->isr_cnt);
3753         mac_control = &sp->mac_control;
3754         config = &sp->config;
3755
3756         /*
3757          * Identify the cause for interrupt and call the appropriate
3758          * interrupt handler. Causes for the interrupt could be;
3759          * 1. Rx of packet.
3760          * 2. Tx complete.
3761          * 3. Link down.
3762          * 4. Error in any functional blocks of the NIC.
3763          */
3764         reason = readq(&bar0->general_int_status);
3765
3766         if (!reason) {
3767                 /* The interrupt was not raised by Xena. */
3768                 atomic_dec(&sp->isr_cnt);
3769                 return IRQ_NONE;
3770         }
3771
3772 #ifdef CONFIG_S2IO_NAPI
3773         if (reason & GEN_INTR_RXTRAFFIC) {
3774                 if (netif_rx_schedule_prep(dev)) {
3775                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3776                                               DISABLE_INTRS);
3777                         __netif_rx_schedule(dev);
3778                 }
3779         }
3780 #else
3781         /* If Intr is because of Rx Traffic */
3782         if (reason & GEN_INTR_RXTRAFFIC) {
3783                 /*
3784                  * rx_traffic_int reg is an R1 register, writing all 1's
3785                  * will ensure that the actual interrupt causing bit get's
3786                  * cleared and hence a read can be avoided.
3787                  */
3788                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3789                 writeq(val64, &bar0->rx_traffic_int);
3790                 for (i = 0; i < config->rx_ring_num; i++) {
3791                         rx_intr_handler(&mac_control->rings[i]);
3792                 }
3793         }
3794 #endif
3795
3796         /* If Intr is because of Tx Traffic */
3797         if (reason & GEN_INTR_TXTRAFFIC) {
3798                 /*
3799                  * tx_traffic_int reg is an R1 register, writing all 1's
3800                  * will ensure that the actual interrupt causing bit get's
3801                  * cleared and hence a read can be avoided.
3802                  */
3803                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3804                 writeq(val64, &bar0->tx_traffic_int);
3805
3806                 for (i = 0; i < config->tx_fifo_num; i++)
3807                         tx_intr_handler(&mac_control->fifos[i]);
3808         }
3809
3810         if (reason & GEN_INTR_TXPIC)
3811                 s2io_txpic_intr_handle(sp);
3812         /*
3813          * If the Rx buffer count is below the panic threshold then
3814          * reallocate the buffers from the interrupt handler itself,
3815          * else schedule a tasklet to reallocate the buffers.
3816          */
3817 #ifndef CONFIG_S2IO_NAPI
3818         for (i = 0; i < config->rx_ring_num; i++) {
3819                 int ret;
3820                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3821                 int level = rx_buffer_level(sp, rxb_size, i);
3822
3823                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3824                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3825                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3826                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3827                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3828                                           dev->name);
3829                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3830                                 clear_bit(0, (&sp->tasklet_status));
3831                                 atomic_dec(&sp->isr_cnt);
3832                                 return IRQ_HANDLED;
3833                         }
3834                         clear_bit(0, (&sp->tasklet_status));
3835                 } else if (level == LOW) {
3836                         tasklet_schedule(&sp->task);
3837                 }
3838         }
3839 #endif
3840
3841         atomic_dec(&sp->isr_cnt);
3842         return IRQ_HANDLED;
3843 }
3844
3845 /**
3846  * s2io_updt_stats -
3847  */
3848 static void s2io_updt_stats(nic_t *sp)
3849 {
3850         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3851         u64 val64;
3852         int cnt = 0;
3853
3854         if (atomic_read(&sp->card_state) == CARD_UP) {
3855                 /* Apprx 30us on a 133 MHz bus */
3856                 val64 = SET_UPDT_CLICKS(10) |
3857                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3858                 writeq(val64, &bar0->stat_cfg);
3859                 do {
3860                         udelay(100);
3861                         val64 = readq(&bar0->stat_cfg);
3862                         if (!(val64 & BIT(0)))
3863                                 break;
3864                         cnt++;
3865                         if (cnt == 5)
3866                                 break; /* Updt failed */
3867                 } while(1);
3868         }
3869 }
3870
3871 /**
3872  *  s2io_get_stats - Updates the device statistics structure.
3873  *  @dev : pointer to the device structure.
3874  *  Description:
3875  *  This function updates the device statistics structure in the s2io_nic
3876  *  structure and returns a pointer to the same.
3877  *  Return value:
3878  *  pointer to the updated net_device_stats structure.
3879  */
3880
3881 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3882 {
3883         nic_t *sp = dev->priv;
3884         mac_info_t *mac_control;
3885         struct config_param *config;
3886
3887
3888         mac_control = &sp->mac_control;
3889         config = &sp->config;
3890
3891         /* Configure Stats for immediate updt */
3892         s2io_updt_stats(sp);
3893
3894         sp->stats.tx_packets =
3895                 le32_to_cpu(mac_control->stats_info->tmac_frms);
3896         sp->stats.tx_errors =
3897                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3898         sp->stats.rx_errors =
3899                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3900         sp->stats.multicast =
3901                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3902         sp->stats.rx_length_errors =
3903                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3904
3905         return (&sp->stats);
3906 }
3907
3908 /**
3909  *  s2io_set_multicast - entry point for multicast address enable/disable.
3910  *  @dev : pointer to the device structure
3911  *  Description:
3912  *  This function is a driver entry point which gets called by the kernel
3913  *  whenever multicast addresses must be enabled/disabled. This also gets
3914  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3915  *  determine, if multicast address must be enabled or if promiscuous mode
3916  *  is to be disabled etc.
3917  *  Return value:
3918  *  void.
3919  */
3920
3921 static void s2io_set_multicast(struct net_device *dev)
3922 {
3923         int i, j, prev_cnt;
3924         struct dev_mc_list *mclist;
3925         nic_t *sp = dev->priv;
3926         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3927         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3928             0xfeffffffffffULL;
3929         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3930         void __iomem *add;
3931
3932         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3933                 /*  Enable all Multicast addresses */
3934                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3935                        &bar0->rmac_addr_data0_mem);
3936                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3937                        &bar0->rmac_addr_data1_mem);
3938                 val64 = RMAC_ADDR_CMD_MEM_WE |
3939                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3940                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3941                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3942                 /* Wait till command completes */
3943                 wait_for_cmd_complete(sp);
3944
3945                 sp->m_cast_flg = 1;
3946                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3947         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3948                 /*  Disable all Multicast addresses */
3949                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3950                        &bar0->rmac_addr_data0_mem);
3951                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3952                        &bar0->rmac_addr_data1_mem);
3953                 val64 = RMAC_ADDR_CMD_MEM_WE |
3954                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3955                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3956                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3957                 /* Wait till command completes */
3958                 wait_for_cmd_complete(sp);
3959
3960                 sp->m_cast_flg = 0;
3961                 sp->all_multi_pos = 0;
3962         }
3963
3964         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3965                 /*  Put the NIC into promiscuous mode */
3966                 add = &bar0->mac_cfg;
3967                 val64 = readq(&bar0->mac_cfg);
3968                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3969
3970                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3971                 writel((u32) val64, add);
3972                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3973                 writel((u32) (val64 >> 32), (add + 4));
3974
3975                 val64 = readq(&bar0->mac_cfg);
3976                 sp->promisc_flg = 1;
3977                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
3978                           dev->name);
3979         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3980                 /*  Remove the NIC from promiscuous mode */
3981                 add = &bar0->mac_cfg;
3982                 val64 = readq(&bar0->mac_cfg);
3983                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3984
3985                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3986                 writel((u32) val64, add);
3987                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3988                 writel((u32) (val64 >> 32), (add + 4));
3989
3990                 val64 = readq(&bar0->mac_cfg);
3991                 sp->promisc_flg = 0;
3992                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
3993                           dev->name);
3994         }
3995
3996         /*  Update individual M_CAST address list */
3997         if ((!sp->m_cast_flg) && dev->mc_count) {
3998                 if (dev->mc_count >
3999                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4000                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4001                                   dev->name);
4002                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4003                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4004                         return;
4005                 }
4006
4007                 prev_cnt = sp->mc_addr_count;
4008                 sp->mc_addr_count = dev->mc_count;
4009
4010                 /* Clear out the previous list of Mc in the H/W. */
4011                 for (i = 0; i < prev_cnt; i++) {
4012                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4013                                &bar0->rmac_addr_data0_mem);
4014                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4015                                 &bar0->rmac_addr_data1_mem);
4016                         val64 = RMAC_ADDR_CMD_MEM_WE |
4017                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4018                             RMAC_ADDR_CMD_MEM_OFFSET
4019                             (MAC_MC_ADDR_START_OFFSET + i);
4020                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4021
4022                         /* Wait for command completes */
4023                         if (wait_for_cmd_complete(sp)) {
4024                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4025                                           dev->name);
4026                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4027                                 return;
4028                         }
4029                 }
4030
4031                 /* Create the new Rx filter list and update the same in H/W. */
4032                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4033                      i++, mclist = mclist->next) {
4034                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4035                                ETH_ALEN);
4036                         for (j = 0; j < ETH_ALEN; j++) {
4037                                 mac_addr |= mclist->dmi_addr[j];
4038                                 mac_addr <<= 8;
4039                         }
4040                         mac_addr >>= 8;
4041                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4042                                &bar0->rmac_addr_data0_mem);
4043                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4044                                 &bar0->rmac_addr_data1_mem);
4045                         val64 = RMAC_ADDR_CMD_MEM_WE |
4046                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4047                             RMAC_ADDR_CMD_MEM_OFFSET
4048                             (i + MAC_MC_ADDR_START_OFFSET);
4049                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4050
4051                         /* Wait for command completes */
4052                         if (wait_for_cmd_complete(sp)) {
4053                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4054                                           dev->name);
4055                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4056                                 return;
4057                         }
4058                 }
4059         }
4060 }
4061
4062 /**
4063  *  s2io_set_mac_addr - Programs the Xframe mac address
4064  *  @dev : pointer to the device structure.
4065  *  @addr: a uchar pointer to the new mac address which is to be set.
4066  *  Description : This procedure will program the Xframe to receive
4067  *  frames with new Mac Address
4068  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4069  *  as defined in errno.h file on failure.
4070  */
4071
4072 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4073 {
4074         nic_t *sp = dev->priv;
4075         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4076         register u64 val64, mac_addr = 0;
4077         int i;
4078
4079         /*
4080          * Set the new MAC address as the new unicast filter and reflect this
4081          * change on the device address registered with the OS. It will be
4082          * at offset 0.
4083          */
4084         for (i = 0; i < ETH_ALEN; i++) {
4085                 mac_addr <<= 8;
4086                 mac_addr |= addr[i];
4087         }
4088
4089         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4090                &bar0->rmac_addr_data0_mem);
4091
4092         val64 =
4093             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4094             RMAC_ADDR_CMD_MEM_OFFSET(0);
4095         writeq(val64, &bar0->rmac_addr_cmd_mem);
4096         /* Wait till command completes */
4097         if (wait_for_cmd_complete(sp)) {
4098                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4099                 return FAILURE;
4100         }
4101
4102         return SUCCESS;
4103 }
4104
4105 /**
4106  * s2io_ethtool_sset - Sets different link parameters.
4107  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4108  * @info: pointer to the structure with parameters given by ethtool to set
4109  * link information.
4110  * Description:
4111  * The function sets different link parameters provided by the user onto
4112  * the NIC.
4113  * Return value:
4114  * 0 on success.
4115 */
4116
4117 static int s2io_ethtool_sset(struct net_device *dev,
4118                              struct ethtool_cmd *info)
4119 {
4120         nic_t *sp = dev->priv;
4121         if ((info->autoneg == AUTONEG_ENABLE) ||
4122             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4123                 return -EINVAL;
4124         else {
4125                 s2io_close(sp->dev);
4126                 s2io_open(sp->dev);
4127         }
4128
4129         return 0;
4130 }
4131
4132 /**
4133  * s2io_ethtol_gset - Return link specific information.
4134  * @sp : private member of the device structure, pointer to the
4135  *      s2io_nic structure.
4136  * @info : pointer to the structure with parameters given by ethtool
4137  * to return link information.
4138  * Description:
4139  * Returns link specific information like speed, duplex etc.. to ethtool.
4140  * Return value :
4141  * return 0 on success.
4142  */
4143
4144 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4145 {
4146         nic_t *sp = dev->priv;
4147         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4148         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4149         info->port = PORT_FIBRE;
4150         /* info->transceiver?? TODO */
4151
4152         if (netif_carrier_ok(sp->dev)) {
4153                 info->speed = 10000;
4154                 info->duplex = DUPLEX_FULL;
4155         } else {
4156                 info->speed = -1;
4157                 info->duplex = -1;
4158         }
4159
4160         info->autoneg = AUTONEG_DISABLE;
4161         return 0;
4162 }
4163
4164 /**
4165  * s2io_ethtool_gdrvinfo - Returns driver specific information.
4166  * @sp : private member of the device structure, which is a pointer to the
4167  * s2io_nic structure.
4168  * @info : pointer to the structure with parameters given by ethtool to
4169  * return driver information.
4170  * Description:
4171  * Returns driver specefic information like name, version etc.. to ethtool.
4172  * Return value:
4173  *  void
4174  */
4175
4176 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4177                                   struct ethtool_drvinfo *info)
4178 {
4179         nic_t *sp = dev->priv;
4180
4181         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4182         strncpy(info->version, s2io_driver_version, sizeof(info->version));
4183         strncpy(info->fw_version, "", sizeof(info->fw_version));
4184         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4185         info->regdump_len = XENA_REG_SPACE;
4186         info->eedump_len = XENA_EEPROM_SPACE;
4187         info->testinfo_len = S2IO_TEST_LEN;
4188         info->n_stats = S2IO_STAT_LEN;
4189 }
4190
4191 /**
4192  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4193  *  @sp: private member of the device structure, which is a pointer to the
4194  *  s2io_nic structure.
4195  *  @regs : pointer to the structure with parameters given by ethtool for
4196  *  dumping the registers.
4197  *  @reg_space: The input argumnet into which all the registers are dumped.
4198  *  Description:
4199  *  Dumps the entire register space of xFrame NIC into the user given
4200  *  buffer area.
4201  * Return value :
4202  * void .
4203 */
4204
4205 static void s2io_ethtool_gregs(struct net_device *dev,
4206                                struct ethtool_regs *regs, void *space)
4207 {
4208         int i;
4209         u64 reg;
4210         u8 *reg_space = (u8 *) space;
4211         nic_t *sp = dev->priv;
4212
4213         regs->len = XENA_REG_SPACE;
4214         regs->version = sp->pdev->subsystem_device;
4215
4216         for (i = 0; i < regs->len; i += 8) {
4217                 reg = readq(sp->bar0 + i);
4218                 memcpy((reg_space + i), &reg, 8);
4219         }
4220 }
4221
4222 /**
4223  *  s2io_phy_id  - timer function that alternates adapter LED.
4224  *  @data : address of the private member of the device structure, which
4225  *  is a pointer to the s2io_nic structure, provided as an u32.
4226  * Description: This is actually the timer function that alternates the
4227  * adapter LED bit of the adapter control bit to set/reset every time on
4228  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4229  *  once every second.
4230 */
4231 static void s2io_phy_id(unsigned long data)
4232 {
4233         nic_t *sp = (nic_t *) data;
4234         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4235         u64 val64 = 0;
4236         u16 subid;
4237
4238         subid = sp->pdev->subsystem_device;
4239         if ((sp->device_type == XFRAME_II_DEVICE) ||
4240                    ((subid & 0xFF) >= 0x07)) {
4241                 val64 = readq(&bar0->gpio_control);
4242                 val64 ^= GPIO_CTRL_GPIO_0;
4243                 writeq(val64, &bar0->gpio_control);
4244         } else {
4245                 val64 = readq(&bar0->adapter_control);
4246                 val64 ^= ADAPTER_LED_ON;
4247                 writeq(val64, &bar0->adapter_control);
4248         }
4249
4250         mod_timer(&sp->id_timer, jiffies + HZ / 2);
4251 }
4252
4253 /**
4254  * s2io_ethtool_idnic - To physically identify the nic on the system.
4255  * @sp : private member of the device structure, which is a pointer to the
4256  * s2io_nic structure.
4257  * @id : pointer to the structure with identification parameters given by
4258  * ethtool.
4259  * Description: Used to physically identify the NIC on the system.
4260  * The Link LED will blink for a time specified by the user for
4261  * identification.
4262  * NOTE: The Link has to be Up to be able to blink the LED. Hence
4263  * identification is possible only if it's link is up.
4264  * Return value:
4265  * int , returns 0 on success
4266  */
4267
4268 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4269 {
4270         u64 val64 = 0, last_gpio_ctrl_val;
4271         nic_t *sp = dev->priv;
4272         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4273         u16 subid;
4274
4275         subid = sp->pdev->subsystem_device;
4276         last_gpio_ctrl_val = readq(&bar0->gpio_control);
4277         if ((sp->device_type == XFRAME_I_DEVICE) &&
4278                 ((subid & 0xFF) < 0x07)) {
4279                 val64 = readq(&bar0->adapter_control);
4280                 if (!(val64 & ADAPTER_CNTL_EN)) {
4281                         printk(KERN_ERR
4282                                "Adapter Link down, cannot blink LED\n");
4283                         return -EFAULT;
4284                 }
4285         }
4286         if (sp->id_timer.function == NULL) {
4287                 init_timer(&sp->id_timer);
4288                 sp->id_timer.function = s2io_phy_id;
4289                 sp->id_timer.data = (unsigned long) sp;
4290         }
4291         mod_timer(&sp->id_timer, jiffies);
4292         if (data)
4293                 msleep_interruptible(data * HZ);
4294         else
4295                 msleep_interruptible(MAX_FLICKER_TIME);
4296         del_timer_sync(&sp->id_timer);
4297
4298         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4299                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4300                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4301         }
4302
4303         return 0;
4304 }
4305
4306 /**
4307  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4308  * @sp : private member of the device structure, which is a pointer to the
4309  *      s2io_nic structure.
4310  * @ep : pointer to the structure with pause parameters given by ethtool.
4311  * Description:
4312  * Returns the Pause frame generation and reception capability of the NIC.
4313  * Return value:
4314  *  void
4315  */
4316 static void s2io_ethtool_getpause_data(struct net_device *dev,
4317                                        struct ethtool_pauseparam *ep)
4318 {
4319         u64 val64;
4320         nic_t *sp = dev->priv;
4321         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4322
4323         val64 = readq(&bar0->rmac_pause_cfg);
4324         if (val64 & RMAC_PAUSE_GEN_ENABLE)
4325                 ep->tx_pause = TRUE;
4326         if (val64 & RMAC_PAUSE_RX_ENABLE)
4327                 ep->rx_pause = TRUE;
4328         ep->autoneg = FALSE;
4329 }
4330
4331 /**
4332  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
4333  * @sp : private member of the device structure, which is a pointer to the
4334  *      s2io_nic structure.
4335  * @ep : pointer to the structure with pause parameters given by ethtool.
4336  * Description:
4337  * It can be used to set or reset Pause frame generation or reception
4338  * support of the NIC.
4339  * Return value:
4340  * int, returns 0 on Success
4341  */
4342
4343 static int s2io_ethtool_setpause_data(struct net_device *dev,
4344                                struct ethtool_pauseparam *ep)
4345 {
4346         u64 val64;
4347         nic_t *sp = dev->priv;
4348         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4349
4350         val64 = readq(&bar0->rmac_pause_cfg);
4351         if (ep->tx_pause)
4352                 val64 |= RMAC_PAUSE_GEN_ENABLE;
4353         else
4354                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4355         if (ep->rx_pause)
4356                 val64 |= RMAC_PAUSE_RX_ENABLE;
4357         else
4358                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4359         writeq(val64, &bar0->rmac_pause_cfg);
4360         return 0;
4361 }
4362
4363 /**
4364  * read_eeprom - reads 4 bytes of data from user given offset.
4365  * @sp : private member of the device structure, which is a pointer to the
4366  *      s2io_nic structure.
4367  * @off : offset at which the data must be written
4368  * @data : Its an output parameter where the data read at the given
4369  *      offset is stored.
4370  * Description:
4371  * Will read 4 bytes of data from the user given offset and return the
4372  * read data.
4373  * NOTE: Will allow to read only part of the EEPROM visible through the
4374  *   I2C bus.
4375  * Return value:
4376  *  -1 on failure and 0 on success.
4377  */
4378
4379 #define S2IO_DEV_ID             5
4380 static int read_eeprom(nic_t * sp, int off, u64 * data)
4381 {
4382         int ret = -1;
4383         u32 exit_cnt = 0;
4384         u64 val64;
4385         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4386
4387         if (sp->device_type == XFRAME_I_DEVICE) {
4388                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4389                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4390                     I2C_CONTROL_CNTL_START;
4391                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4392
4393                 while (exit_cnt < 5) {
4394                         val64 = readq(&bar0->i2c_control);
4395                         if (I2C_CONTROL_CNTL_END(val64)) {
4396                                 *data = I2C_CONTROL_GET_DATA(val64);
4397                                 ret = 0;
4398                                 break;
4399                         }
4400                         msleep(50);
4401                         exit_cnt++;
4402                 }
4403         }
4404
4405         if (sp->device_type == XFRAME_II_DEVICE) {
4406                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4407                         SPI_CONTROL_BYTECNT(0x3) | 
4408                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4409                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4410                 val64 |= SPI_CONTROL_REQ;
4411                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4412                 while (exit_cnt < 5) {
4413                         val64 = readq(&bar0->spi_control);
4414                         if (val64 & SPI_CONTROL_NACK) {
4415                                 ret = 1;
4416                                 break;
4417                         } else if (val64 & SPI_CONTROL_DONE) {
4418                                 *data = readq(&bar0->spi_data);
4419                                 *data &= 0xffffff;
4420                                 ret = 0;
4421                                 break;
4422                         }
4423                         msleep(50);
4424                         exit_cnt++;
4425                 }
4426         }
4427         return ret;
4428 }
4429
4430 /**
4431  *  write_eeprom - actually writes the relevant part of the data value.
4432  *  @sp : private member of the device structure, which is a pointer to the
4433  *       s2io_nic structure.
4434  *  @off : offset at which the data must be written
4435  *  @data : The data that is to be written
4436  *  @cnt : Number of bytes of the data that are actually to be written into
4437  *  the Eeprom. (max of 3)
4438  * Description:
4439  *  Actually writes the relevant part of the data value into the Eeprom
4440  *  through the I2C bus.
4441  * Return value:
4442  *  0 on success, -1 on failure.
4443  */
4444
4445 static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4446 {
4447         int exit_cnt = 0, ret = -1;
4448         u64 val64;
4449         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4450
4451         if (sp->device_type == XFRAME_I_DEVICE) {
4452                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4453                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4454                     I2C_CONTROL_CNTL_START;
4455                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4456
4457                 while (exit_cnt < 5) {
4458                         val64 = readq(&bar0->i2c_control);
4459                         if (I2C_CONTROL_CNTL_END(val64)) {
4460                                 if (!(val64 & I2C_CONTROL_NACK))
4461                                         ret = 0;
4462                                 break;
4463                         }
4464                         msleep(50);
4465                         exit_cnt++;
4466                 }
4467         }
4468
4469         if (sp->device_type == XFRAME_II_DEVICE) {
4470                 int write_cnt = (cnt == 8) ? 0 : cnt;
4471                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4472
4473                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4474                         SPI_CONTROL_BYTECNT(write_cnt) | 
4475                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4476                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4477                 val64 |= SPI_CONTROL_REQ;
4478                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4479                 while (exit_cnt < 5) {
4480                         val64 = readq(&bar0->spi_control);
4481                         if (val64 & SPI_CONTROL_NACK) {
4482                                 ret = 1;
4483                                 break;
4484                         } else if (val64 & SPI_CONTROL_DONE) {
4485                                 ret = 0;
4486                                 break;
4487                         }
4488                         msleep(50);
4489                         exit_cnt++;
4490                 }
4491         }
4492         return ret;
4493 }
4494
4495 /**
4496  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
4497  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
4498  *  @eeprom : pointer to the user level structure provided by ethtool,
4499  *  containing all relevant information.
4500  *  @data_buf : user defined value to be written into Eeprom.
4501  *  Description: Reads the values stored in the Eeprom at given offset
4502  *  for a given length. Stores these values int the input argument data
4503  *  buffer 'data_buf' and returns these to the caller (ethtool.)
4504  *  Return value:
4505  *  int  0 on success
4506  */
4507
4508 static int s2io_ethtool_geeprom(struct net_device *dev,
4509                          struct ethtool_eeprom *eeprom, u8 * data_buf)
4510 {
4511         u32 i, valid;
4512         u64 data;
4513         nic_t *sp = dev->priv;
4514
4515         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4516
4517         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4518                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4519
4520         for (i = 0; i < eeprom->len; i += 4) {
4521                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4522                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4523                         return -EFAULT;
4524                 }
4525                 valid = INV(data);
4526                 memcpy((data_buf + i), &valid, 4);
4527         }
4528         return 0;
4529 }
4530
4531 /**
4532  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4533  *  @sp : private member of the device structure, which is a pointer to the
4534  *  s2io_nic structure.
4535  *  @eeprom : pointer to the user level structure provided by ethtool,
4536  *  containing all relevant information.
4537  *  @data_buf ; user defined value to be written into Eeprom.
4538  *  Description:
4539  *  Tries to write the user provided value in the Eeprom, at the offset
4540  *  given by the user.
4541  *  Return value:
4542  *  0 on success, -EFAULT on failure.
4543  */
4544
4545 static int s2io_ethtool_seeprom(struct net_device *dev,
4546                                 struct ethtool_eeprom *eeprom,
4547                                 u8 * data_buf)
4548 {
4549         int len = eeprom->len, cnt = 0;
4550         u64 valid = 0, data;
4551         nic_t *sp = dev->priv;
4552
4553         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4554                 DBG_PRINT(ERR_DBG,
4555                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4556                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4557                           eeprom->magic);
4558                 return -EFAULT;
4559         }
4560
4561         while (len) {
4562                 data = (u32) data_buf[cnt] & 0x000000FF;
4563                 if (data) {
4564                         valid = (u32) (data << 24);
4565                 } else
4566                         valid = data;
4567
4568                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4569                         DBG_PRINT(ERR_DBG,
4570                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4571                         DBG_PRINT(ERR_DBG,
4572                                   "write into the specified offset\n");
4573                         return -EFAULT;
4574                 }
4575                 cnt++;
4576                 len--;
4577         }
4578
4579         return 0;
4580 }
4581
4582 /**
4583  * s2io_register_test - reads and writes into all clock domains.
4584  * @sp : private member of the device structure, which is a pointer to the
4585  * s2io_nic structure.
4586  * @data : variable that returns the result of each of the test conducted b
4587  * by the driver.
4588  * Description:
4589  * Read and write into all clock domains. The NIC has 3 clock domains,
4590  * see that registers in all the three regions are accessible.
4591  * Return value:
4592  * 0 on success.
4593  */
4594
4595 static int s2io_register_test(nic_t * sp, uint64_t * data)
4596 {
4597         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4598         u64 val64 = 0, exp_val;
4599         int fail = 0;
4600
4601         val64 = readq(&bar0->pif_rd_swapper_fb);
4602         if (val64 != 0x123456789abcdefULL) {
4603                 fail = 1;
4604                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4605         }
4606
4607         val64 = readq(&bar0->rmac_pause_cfg);
4608         if (val64 != 0xc000ffff00000000ULL) {
4609                 fail = 1;
4610                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4611         }
4612
4613         val64 = readq(&bar0->rx_queue_cfg);
4614         if (sp->device_type == XFRAME_II_DEVICE)
4615                 exp_val = 0x0404040404040404ULL;
4616         else
4617                 exp_val = 0x0808080808080808ULL;
4618         if (val64 != exp_val) {
4619                 fail = 1;
4620                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4621         }
4622
4623         val64 = readq(&bar0->xgxs_efifo_cfg);
4624         if (val64 != 0x000000001923141EULL) {
4625                 fail = 1;
4626                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4627         }
4628
4629         val64 = 0x5A5A5A5A5A5A5A5AULL;
4630         writeq(val64, &bar0->xmsi_data);
4631         val64 = readq(&bar0->xmsi_data);
4632         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4633                 fail = 1;
4634                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4635         }
4636
4637         val64 = 0xA5A5A5A5A5A5A5A5ULL;
4638         writeq(val64, &bar0->xmsi_data);
4639         val64 = readq(&bar0->xmsi_data);
4640         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4641                 fail = 1;
4642                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4643         }
4644
4645         *data = fail;
4646         return fail;
4647 }
4648
4649 /**
4650  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4651  * @sp : private member of the device structure, which is a pointer to the
4652  * s2io_nic structure.
4653  * @data:variable that returns the result of each of the test conducted by
4654  * the driver.
4655  * Description:
4656  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4657  * register.
4658  * Return value:
4659  * 0 on success.
4660  */
4661
4662 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4663 {
4664         int fail = 0;
4665         u64 ret_data, org_4F0, org_7F0;
4666         u8 saved_4F0 = 0, saved_7F0 = 0;
4667         struct net_device *dev = sp->dev;
4668
4669         /* Test Write Error at offset 0 */
4670         /* Note that SPI interface allows write access to all areas
4671          * of EEPROM. Hence doing all negative testing only for Xframe I.
4672          */
4673         if (sp->device_type == XFRAME_I_DEVICE)
4674                 if (!write_eeprom(sp, 0, 0, 3))
4675                         fail = 1;
4676
4677         /* Save current values at offsets 0x4F0 and 0x7F0 */
4678         if (!read_eeprom(sp, 0x4F0, &org_4F0))
4679                 saved_4F0 = 1;
4680         if (!read_eeprom(sp, 0x7F0, &org_7F0))
4681                 saved_7F0 = 1;
4682
4683         /* Test Write at offset 4f0 */
4684         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
4685                 fail = 1;
4686         if (read_eeprom(sp, 0x4F0, &ret_data))
4687                 fail = 1;
4688
4689         if (ret_data != 0x012345) {
4690                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data); 
4691                 fail = 1;
4692         }
4693
4694         /* Reset the EEPROM data go FFFF */
4695         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
4696
4697         /* Test Write Request Error at offset 0x7c */
4698         if (sp->device_type == XFRAME_I_DEVICE)
4699                 if (!write_eeprom(sp, 0x07C, 0, 3))
4700                         fail = 1;
4701
4702         /* Test Write Request at offset 0x7f0 */
4703         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
4704                 fail = 1;
4705         if (read_eeprom(sp, 0x7F0, &ret_data))
4706                 fail = 1;
4707
4708         if (ret_data != 0x012345) {
4709                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data); 
4710                 fail = 1;
4711         }
4712
4713         /* Reset the EEPROM data go FFFF */
4714         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
4715
4716         if (sp->device_type == XFRAME_I_DEVICE) {
4717                 /* Test Write Error at offset 0x80 */
4718                 if (!write_eeprom(sp, 0x080, 0, 3))
4719                         fail = 1;
4720
4721                 /* Test Write Error at offset 0xfc */
4722                 if (!write_eeprom(sp, 0x0FC, 0, 3))
4723                         fail = 1;
4724
4725                 /* Test Write Error at offset 0x100 */
4726                 if (!write_eeprom(sp, 0x100, 0, 3))
4727                         fail = 1;
4728
4729                 /* Test Write Error at offset 4ec */
4730                 if (!write_eeprom(sp, 0x4EC, 0, 3))
4731                         fail = 1;
4732         }
4733
4734         /* Restore values at offsets 0x4F0 and 0x7F0 */
4735         if (saved_4F0)
4736                 write_eeprom(sp, 0x4F0, org_4F0, 3);
4737         if (saved_7F0)
4738                 write_eeprom(sp, 0x7F0, org_7F0, 3);
4739
4740         *data = fail;
4741         return fail;
4742 }
4743
4744 /**
4745  * s2io_bist_test - invokes the MemBist test of the card .
4746  * @sp : private member of the device structure, which is a pointer to the
4747  * s2io_nic structure.
4748  * @data:variable that returns the result of each of the test conducted by
4749  * the driver.
4750  * Description:
4751  * This invokes the MemBist test of the card. We give around
4752  * 2 secs time for the Test to complete. If it's still not complete
4753  * within this peiod, we consider that the test failed.
4754  * Return value:
4755  * 0 on success and -1 on failure.
4756  */
4757
4758 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4759 {
4760         u8 bist = 0;
4761         int cnt = 0, ret = -1;
4762
4763         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4764         bist |= PCI_BIST_START;
4765         pci_write_config_word(sp->pdev, PCI_BIST, bist);
4766
4767         while (cnt < 20) {
4768                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4769                 if (!(bist & PCI_BIST_START)) {
4770                         *data = (bist & PCI_BIST_CODE_MASK);
4771                         ret = 0;
4772                         break;
4773                 }
4774                 msleep(100);
4775                 cnt++;
4776         }
4777
4778         return ret;
4779 }
4780
4781 /**
4782  * s2io-link_test - verifies the link state of the nic
4783  * @sp ; private member of the device structure, which is a pointer to the
4784  * s2io_nic structure.
4785  * @data: variable that returns the result of each of the test conducted by
4786  * the driver.
4787  * Description:
4788  * The function verifies the link state of the NIC and updates the input
4789  * argument 'data' appropriately.
4790  * Return value:
4791  * 0 on success.
4792  */
4793
4794 static int s2io_link_test(nic_t * sp, uint64_t * data)
4795 {
4796         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4797         u64 val64;
4798
4799         val64 = readq(&bar0->adapter_status);
4800         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4801                 *data = 1;
4802
4803         return 0;
4804 }
4805
4806 /**
4807  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4808  * @sp - private member of the device structure, which is a pointer to the
4809  * s2io_nic structure.
4810  * @data - variable that returns the result of each of the test
4811  * conducted by the driver.
4812  * Description:
4813  *  This is one of the offline test that tests the read and write
4814  *  access to the RldRam chip on the NIC.
4815  * Return value:
4816  *  0 on success.
4817  */
4818
4819 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4820 {
4821         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4822         u64 val64;
4823         int cnt, iteration = 0, test_fail = 0;
4824
4825         val64 = readq(&bar0->adapter_control);
4826         val64 &= ~ADAPTER_ECC_EN;
4827         writeq(val64, &bar0->adapter_control);
4828
4829         val64 = readq(&bar0->mc_rldram_test_ctrl);
4830         val64 |= MC_RLDRAM_TEST_MODE;
4831         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4832
4833         val64 = readq(&bar0->mc_rldram_mrs);
4834         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4835         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4836
4837         val64 |= MC_RLDRAM_MRS_ENABLE;
4838         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4839
4840         while (iteration < 2) {
4841                 val64 = 0x55555555aaaa0000ULL;
4842                 if (iteration == 1) {
4843                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4844                 }
4845                 writeq(val64, &bar0->mc_rldram_test_d0);
4846
4847                 val64 = 0xaaaa5a5555550000ULL;
4848                 if (iteration == 1) {
4849                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4850                 }
4851                 writeq(val64, &bar0->mc_rldram_test_d1);
4852
4853                 val64 = 0x55aaaaaaaa5a0000ULL;
4854                 if (iteration == 1) {
4855                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4856                 }
4857                 writeq(val64, &bar0->mc_rldram_test_d2);
4858
4859                 val64 = (u64) (0x0000003ffffe0100ULL);
4860                 writeq(val64, &bar0->mc_rldram_test_add);
4861
4862                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4863                         MC_RLDRAM_TEST_GO;
4864                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4865
4866                 for (cnt = 0; cnt < 5; cnt++) {
4867                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4868                         if (val64 & MC_RLDRAM_TEST_DONE)
4869                                 break;
4870                         msleep(200);
4871                 }
4872
4873                 if (cnt == 5)
4874                         break;
4875
4876                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4877                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4878
4879                 for (cnt = 0; cnt < 5; cnt++) {
4880                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4881                         if (val64 & MC_RLDRAM_TEST_DONE)
4882                                 break;
4883                         msleep(500);
4884                 }
4885
4886                 if (cnt == 5)
4887                         break;
4888
4889                 val64 = readq(&bar0->mc_rldram_test_ctrl);
4890                 if (!(val64 & MC_RLDRAM_TEST_PASS))
4891                         test_fail = 1;
4892
4893                 iteration++;
4894         }
4895
4896         *data = test_fail;
4897
4898         /* Bring the adapter out of test mode */
4899         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
4900
4901         return test_fail;
4902 }
4903
4904 /**
4905  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4906  *  @sp : private member of the device structure, which is a pointer to the
4907  *  s2io_nic structure.
4908  *  @ethtest : pointer to a ethtool command specific structure that will be
4909  *  returned to the user.
4910  *  @data : variable that returns the result of each of the test
4911  * conducted by the driver.
4912  * Description:
4913  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
4914  *  the health of the card.
4915  * Return value:
4916  *  void
4917  */
4918
4919 static void s2io_ethtool_test(struct net_device *dev,
4920                               struct ethtool_test *ethtest,
4921                               uint64_t * data)
4922 {
4923         nic_t *sp = dev->priv;
4924         int orig_state = netif_running(sp->dev);
4925
4926         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4927                 /* Offline Tests. */
4928                 if (orig_state)
4929                         s2io_close(sp->dev);
4930
4931                 if (s2io_register_test(sp, &data[0]))
4932                         ethtest->flags |= ETH_TEST_FL_FAILED;
4933
4934                 s2io_reset(sp);
4935
4936                 if (s2io_rldram_test(sp, &data[3]))
4937                         ethtest->flags |= ETH_TEST_FL_FAILED;
4938
4939                 s2io_reset(sp);
4940
4941                 if (s2io_eeprom_test(sp, &data[1]))
4942                         ethtest->flags |= ETH_TEST_FL_FAILED;
4943
4944                 if (s2io_bist_test(sp, &data[4]))
4945                         ethtest->flags |= ETH_TEST_FL_FAILED;
4946
4947                 if (orig_state)
4948                         s2io_open(sp->dev);
4949
4950                 data[2] = 0;
4951         } else {
4952                 /* Online Tests. */
4953                 if (!orig_state) {
4954                         DBG_PRINT(ERR_DBG,
4955                                   "%s: is not up, cannot run test\n",
4956                                   dev->name);
4957                         data[0] = -1;
4958                         data[1] = -1;
4959                         data[2] = -1;
4960                         data[3] = -1;
4961                         data[4] = -1;
4962                 }
4963
4964                 if (s2io_link_test(sp, &data[2]))
4965                         ethtest->flags |= ETH_TEST_FL_FAILED;
4966
4967                 data[0] = 0;
4968                 data[1] = 0;
4969                 data[3] = 0;
4970                 data[4] = 0;
4971         }
4972 }
4973
4974 static void s2io_get_ethtool_stats(struct net_device *dev,
4975                                    struct ethtool_stats *estats,
4976                                    u64 * tmp_stats)
4977 {
4978         int i = 0;
4979         nic_t *sp = dev->priv;
4980         StatInfo_t *stat_info = sp->mac_control.stats_info;
4981
4982         s2io_updt_stats(sp);
4983         tmp_stats[i++] =
4984                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
4985                 le32_to_cpu(stat_info->tmac_frms);
4986         tmp_stats[i++] =
4987                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4988                 le32_to_cpu(stat_info->tmac_data_octets);
4989         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4990         tmp_stats[i++] =
4991                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4992                 le32_to_cpu(stat_info->tmac_mcst_frms);
4993         tmp_stats[i++] =
4994                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4995                 le32_to_cpu(stat_info->tmac_bcst_frms);
4996         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4997         tmp_stats[i++] =
4998                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4999                 le32_to_cpu(stat_info->tmac_any_err_frms);
5000         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5001         tmp_stats[i++] =
5002                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5003                 le32_to_cpu(stat_info->tmac_vld_ip);
5004         tmp_stats[i++] =
5005                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5006                 le32_to_cpu(stat_info->tmac_drop_ip);
5007         tmp_stats[i++] =
5008                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5009                 le32_to_cpu(stat_info->tmac_icmp);
5010         tmp_stats[i++] =
5011                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5012                 le32_to_cpu(stat_info->tmac_rst_tcp);
5013         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5014         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5015                 le32_to_cpu(stat_info->tmac_udp);
5016         tmp_stats[i++] =
5017                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5018                 le32_to_cpu(stat_info->rmac_vld_frms);
5019         tmp_stats[i++] =
5020                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5021                 le32_to_cpu(stat_info->rmac_data_octets);
5022         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5023         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5024         tmp_stats[i++] =
5025                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5026                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5027         tmp_stats[i++] =
5028                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5029                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5030         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5031         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5032         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5033         tmp_stats[i++] =
5034                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5035                 le32_to_cpu(stat_info->rmac_discarded_frms);
5036         tmp_stats[i++] =
5037                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5038                 le32_to_cpu(stat_info->rmac_usized_frms);
5039         tmp_stats[i++] =
5040                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5041                 le32_to_cpu(stat_info->rmac_osized_frms);
5042         tmp_stats[i++] =
5043                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5044                 le32_to_cpu(stat_info->rmac_frag_frms);
5045         tmp_stats[i++] =
5046                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5047                 le32_to_cpu(stat_info->rmac_jabber_frms);
5048         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5049                 le32_to_cpu(stat_info->rmac_ip);
5050         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5051         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5052         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5053                 le32_to_cpu(stat_info->rmac_drop_ip);
5054         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5055                 le32_to_cpu(stat_info->rmac_icmp);
5056         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5057         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5058                 le32_to_cpu(stat_info->rmac_udp);
5059         tmp_stats[i++] =
5060                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5061                 le32_to_cpu(stat_info->rmac_err_drp_udp);
5062         tmp_stats[i++] =
5063                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5064                 le32_to_cpu(stat_info->rmac_pause_cnt);
5065         tmp_stats[i++] =
5066                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5067                 le32_to_cpu(stat_info->rmac_accepted_ip);
5068         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5069         tmp_stats[i++] = 0;
5070         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5071         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5072 }
5073
5074 int s2io_ethtool_get_regs_len(struct net_device *dev)
5075 {
5076         return (XENA_REG_SPACE);
5077 }
5078
5079
5080 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5081 {
5082         nic_t *sp = dev->priv;
5083
5084         return (sp->rx_csum);
5085 }
5086 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5087 {
5088         nic_t *sp = dev->priv;
5089
5090         if (data)
5091                 sp->rx_csum = 1;
5092         else
5093                 sp->rx_csum = 0;
5094
5095         return 0;
5096 }
5097 int s2io_get_eeprom_len(struct net_device *dev)
5098 {
5099         return (XENA_EEPROM_SPACE);
5100 }
5101
5102 int s2io_ethtool_self_test_count(struct net_device *dev)
5103 {
5104         return (S2IO_TEST_LEN);
5105 }
5106 void s2io_ethtool_get_strings(struct net_device *dev,
5107                               u32 stringset, u8 * data)
5108 {
5109         switch (stringset) {
5110         case ETH_SS_TEST:
5111                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5112                 break;
5113         case ETH_SS_STATS:
5114                 memcpy(data, &ethtool_stats_keys,
5115                        sizeof(ethtool_stats_keys));
5116         }
5117 }
5118 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5119 {
5120         return (S2IO_STAT_LEN);
5121 }
5122
5123 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5124 {
5125         if (data)
5126                 dev->features |= NETIF_F_IP_CSUM;
5127         else
5128                 dev->features &= ~NETIF_F_IP_CSUM;
5129
5130         return 0;
5131 }
5132
5133
5134 static struct ethtool_ops netdev_ethtool_ops = {
5135         .get_settings = s2io_ethtool_gset,
5136         .set_settings = s2io_ethtool_sset,
5137         .get_drvinfo = s2io_ethtool_gdrvinfo,
5138         .get_regs_len = s2io_ethtool_get_regs_len,
5139         .get_regs = s2io_ethtool_gregs,
5140         .get_link = ethtool_op_get_link,
5141         .get_eeprom_len = s2io_get_eeprom_len,
5142         .get_eeprom = s2io_ethtool_geeprom,
5143         .set_eeprom = s2io_ethtool_seeprom,
5144         .get_pauseparam = s2io_ethtool_getpause_data,
5145         .set_pauseparam = s2io_ethtool_setpause_data,
5146         .get_rx_csum = s2io_ethtool_get_rx_csum,
5147         .set_rx_csum = s2io_ethtool_set_rx_csum,
5148         .get_tx_csum = ethtool_op_get_tx_csum,
5149         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5150         .get_sg = ethtool_op_get_sg,
5151         .set_sg = ethtool_op_set_sg,
5152 #ifdef NETIF_F_TSO
5153         .get_tso = ethtool_op_get_tso,
5154         .set_tso = ethtool_op_set_tso,
5155 #endif
5156         .self_test_count = s2io_ethtool_self_test_count,
5157         .self_test = s2io_ethtool_test,
5158         .get_strings = s2io_ethtool_get_strings,
5159         .phys_id = s2io_ethtool_idnic,
5160         .get_stats_count = s2io_ethtool_get_stats_count,
5161         .get_ethtool_stats = s2io_get_ethtool_stats
5162 };
5163
5164 /**
5165  *  s2io_ioctl - Entry point for the Ioctl
5166  *  @dev :  Device pointer.
5167  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
5168  *  a proprietary structure used to pass information to the driver.
5169  *  @cmd :  This is used to distinguish between the different commands that
5170  *  can be passed to the IOCTL functions.
5171  *  Description:
5172  *  Currently there are no special functionality supported in IOCTL, hence
5173  *  function always return EOPNOTSUPPORTED
5174  */
5175
5176 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5177 {
5178         return -EOPNOTSUPP;
5179 }
5180
5181 /**
5182  *  s2io_change_mtu - entry point to change MTU size for the device.
5183  *   @dev : device pointer.
5184  *   @new_mtu : the new MTU size for the device.
5185  *   Description: A driver entry point to change MTU size for the device.
5186  *   Before changing the MTU the device must be stopped.
5187  *  Return value:
5188  *   0 on success and an appropriate (-)ve integer as defined in errno.h
5189  *   file on failure.
5190  */
5191
5192 int s2io_change_mtu(struct net_device *dev, int new_mtu)
5193 {
5194         nic_t *sp = dev->priv;
5195
5196         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5197                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5198                           dev->name);
5199                 return -EPERM;
5200         }
5201
5202         dev->mtu = new_mtu;
5203         if (netif_running(dev)) {
5204                 s2io_card_down(sp);
5205                 netif_stop_queue(dev);
5206                 if (s2io_card_up(sp)) {
5207                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5208                                   __FUNCTION__);
5209                 }
5210                 if (netif_queue_stopped(dev))
5211                         netif_wake_queue(dev);
5212         } else { /* Device is down */
5213                 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5214                 u64 val64 = new_mtu;
5215
5216                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5217         }
5218
5219         return 0;
5220 }
5221
5222 /**
5223  *  s2io_tasklet - Bottom half of the ISR.
5224  *  @dev_adr : address of the device structure in dma_addr_t format.
5225  *  Description:
5226  *  This is the tasklet or the bottom half of the ISR. This is
5227  *  an extension of the ISR which is scheduled by the scheduler to be run
5228  *  when the load on the CPU is low. All low priority tasks of the ISR can
5229  *  be pushed into the tasklet. For now the tasklet is used only to
5230  *  replenish the Rx buffers in the Rx buffer descriptors.
5231  *  Return value:
5232  *  void.
5233  */
5234
5235 static void s2io_tasklet(unsigned long dev_addr)
5236 {
5237         struct net_device *dev = (struct net_device *) dev_addr;
5238         nic_t *sp = dev->priv;
5239         int i, ret;
5240         mac_info_t *mac_control;
5241         struct config_param *config;
5242
5243         mac_control = &sp->mac_control;
5244         config = &sp->config;
5245
5246         if (!TASKLET_IN_USE) {
5247                 for (i = 0; i < config->rx_ring_num; i++) {
5248                         ret = fill_rx_buffers(sp, i);
5249                         if (ret == -ENOMEM) {
5250                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
5251                                           dev->name);
5252                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5253                                 break;
5254                         } else if (ret == -EFILL) {
5255                                 DBG_PRINT(ERR_DBG,
5256                                           "%s: Rx Ring %d is full\n",
5257                                           dev->name, i);
5258                                 break;
5259                         }
5260                 }
5261                 clear_bit(0, (&sp->tasklet_status));
5262         }
5263 }
5264
5265 /**
5266  * s2io_set_link - Set the LInk status
5267  * @data: long pointer to device private structue
5268  * Description: Sets the link status for the adapter
5269  */
5270
5271 static void s2io_set_link(unsigned long data)
5272 {
5273         nic_t *nic = (nic_t *) data;
5274         struct net_device *dev = nic->dev;
5275         XENA_dev_config_t __iomem *bar0 = nic->bar0;
5276         register u64 val64;
5277         u16 subid;
5278
5279         if (test_and_set_bit(0, &(nic->link_state))) {
5280                 /* The card is being reset, no point doing anything */
5281                 return;
5282         }
5283
5284         subid = nic->pdev->subsystem_device;
5285         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5286                 /*
5287                  * Allow a small delay for the NICs self initiated
5288                  * cleanup to complete.
5289                  */
5290                 msleep(100);
5291         }
5292
5293         val64 = readq(&bar0->adapter_status);
5294         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
5295                 if (LINK_IS_UP(val64)) {
5296                         val64 = readq(&bar0->adapter_control);
5297                         val64 |= ADAPTER_CNTL_EN;
5298                         writeq(val64, &bar0->adapter_control);
5299                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5300                                                              subid)) {
5301                                 val64 = readq(&bar0->gpio_control);
5302                                 val64 |= GPIO_CTRL_GPIO_0;
5303                                 writeq(val64, &bar0->gpio_control);
5304                                 val64 = readq(&bar0->gpio_control);
5305                         } else {
5306                                 val64 |= ADAPTER_LED_ON;
5307                                 writeq(val64, &bar0->adapter_control);
5308                         }
5309                         if (s2io_link_fault_indication(nic) ==
5310                                                 MAC_RMAC_ERR_TIMER) {
5311                                 val64 = readq(&bar0->adapter_status);
5312                                 if (!LINK_IS_UP(val64)) {
5313                                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
5314                                         DBG_PRINT(ERR_DBG, " Link down");
5315                                         DBG_PRINT(ERR_DBG, "after ");
5316                                         DBG_PRINT(ERR_DBG, "enabling ");
5317                                         DBG_PRINT(ERR_DBG, "device \n");
5318                                 }
5319                         }
5320                         if (nic->device_enabled_once == FALSE) {
5321                                 nic->device_enabled_once = TRUE;
5322                         }
5323                         s2io_link(nic, LINK_UP);
5324                 } else {
5325                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5326                                                               subid)) {
5327                                 val64 = readq(&bar0->gpio_control);
5328                                 val64 &= ~GPIO_CTRL_GPIO_0;
5329                                 writeq(val64, &bar0->gpio_control);
5330                                 val64 = readq(&bar0->gpio_control);
5331                         }
5332                         s2io_link(nic, LINK_DOWN);
5333                 }
5334         } else {                /* NIC is not Quiescent. */
5335                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5336                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5337                 netif_stop_queue(dev);
5338         }
5339         clear_bit(0, &(nic->link_state));
5340 }
5341
5342 static void s2io_card_down(nic_t * sp)
5343 {
5344         int cnt = 0;
5345         XENA_dev_config_t __iomem *bar0 = sp->bar0;
5346         unsigned long flags;
5347         register u64 val64 = 0;
5348
5349         del_timer_sync(&sp->alarm_timer);
5350         /* If s2io_set_link task is executing, wait till it completes. */
5351         while (test_and_set_bit(0, &(sp->link_state))) {
5352                 msleep(50);
5353         }
5354         atomic_set(&sp->card_state, CARD_DOWN);
5355
5356         /* disable Tx and Rx traffic on the NIC */
5357         stop_nic(sp);
5358
5359         /* Kill tasklet. */
5360         tasklet_kill(&sp->task);
5361
5362         /* Check if the device is Quiescent and then Reset the NIC */
5363         do {
5364                 val64 = readq(&bar0->adapter_status);
5365                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
5366                         break;
5367                 }
5368
5369                 msleep(50);
5370                 cnt++;
5371                 if (cnt == 10) {
5372                         DBG_PRINT(ERR_DBG,
5373                                   "s2io_close:Device not Quiescent ");
5374                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
5375                                   (unsigned long long) val64);
5376                         break;
5377                 }
5378         } while (1);
5379         s2io_reset(sp);
5380
5381         /* Waiting till all Interrupt handlers are complete */
5382         cnt = 0;
5383         do {
5384                 msleep(10);
5385                 if (!atomic_read(&sp->isr_cnt))
5386                         break;
5387                 cnt++;
5388         } while(cnt < 5);
5389
5390         spin_lock_irqsave(&sp->tx_lock, flags);
5391         /* Free all Tx buffers */
5392         free_tx_buffers(sp);
5393         spin_unlock_irqrestore(&sp->tx_lock, flags);
5394
5395         /* Free all Rx buffers */
5396         spin_lock_irqsave(&sp->rx_lock, flags);
5397         free_rx_buffers(sp);
5398         spin_unlock_irqrestore(&sp->rx_lock, flags);
5399
5400         clear_bit(0, &(sp->link_state));
5401 }
5402
5403 static int s2io_card_up(nic_t * sp)
5404 {
5405         int i, ret = 0;
5406         mac_info_t *mac_control;
5407         struct config_param *config;
5408         struct net_device *dev = (struct net_device *) sp->dev;
5409
5410         /* Initialize the H/W I/O registers */
5411         if (init_nic(sp) != 0) {
5412                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
5413                           dev->name);
5414                 return -ENODEV;
5415         }
5416
5417         if (sp->intr_type == MSI)
5418                 ret = s2io_enable_msi(sp);
5419         else if (sp->intr_type == MSI_X)
5420                 ret = s2io_enable_msi_x(sp);
5421         if (ret) {
5422                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
5423                 sp->intr_type = INTA;
5424         }
5425
5426         /*
5427          * Initializing the Rx buffers. For now we are considering only 1
5428          * Rx ring and initializing buffers into 30 Rx blocks
5429          */
5430         mac_control = &sp->mac_control;
5431         config = &sp->config;
5432
5433         for (i = 0; i < config->rx_ring_num; i++) {
5434                 if ((ret = fill_rx_buffers(sp, i))) {
5435                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
5436                                   dev->name);
5437                         s2io_reset(sp);
5438                         free_rx_buffers(sp);
5439                         return -ENOMEM;
5440                 }
5441                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
5442                           atomic_read(&sp->rx_bufs_left[i]));
5443         }
5444
5445         /* Setting its receive mode */
5446         s2io_set_multicast(dev);
5447
5448         /* Enable tasklet for the device */
5449         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
5450
5451         /* Enable Rx Traffic and interrupts on the NIC */
5452         if (start_nic(sp)) {
5453                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
5454                 tasklet_kill(&sp->task);
5455                 s2io_reset(sp);
5456                 free_irq(dev->irq, dev);
5457                 free_rx_buffers(sp);
5458                 return -ENODEV;
5459         }
5460
5461         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
5462
5463         atomic_set(&sp->card_state, CARD_UP);
5464         return 0;
5465 }
5466
5467 /**
5468  * s2io_restart_nic - Resets the NIC.
5469  * @data : long pointer to the device private structure
5470  * Description:
5471  * This function is scheduled to be run by the s2io_tx_watchdog
5472  * function after 0.5 secs to reset the NIC. The idea is to reduce
5473  * the run time of the watch dog routine which is run holding a
5474  * spin lock.
5475  */
5476
5477 static void s2io_restart_nic(unsigned long data)
5478 {
5479         struct net_device *dev = (struct net_device *) data;
5480         nic_t *sp = dev->priv;
5481
5482         s2io_card_down(sp);
5483         if (s2io_card_up(sp)) {
5484                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5485                           dev->name);
5486         }
5487         netif_wake_queue(dev);
5488         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
5489                   dev->name);
5490
5491 }
5492
5493 /**
5494  *  s2io_tx_watchdog - Watchdog for transmit side.
5495  *  @dev : Pointer to net device structure
5496  *  Description:
5497  *  This function is triggered if the Tx Queue is stopped
5498  *  for a pre-defined amount of time when the Interface is still up.
5499  *  If the Interface is jammed in such a situation, the hardware is
5500  *  reset (by s2io_close) and restarted again (by s2io_open) to
5501  *  overcome any problem that might have been caused in the hardware.
5502  *  Return value:
5503  *  void
5504  */
5505
5506 static void s2io_tx_watchdog(struct net_device *dev)
5507 {
5508         nic_t *sp = dev->priv;
5509
5510         if (netif_carrier_ok(dev)) {
5511                 schedule_work(&sp->rst_timer_task);
5512         }
5513 }
5514
5515 /**
5516  *   rx_osm_handler - To perform some OS related operations on SKB.
5517  *   @sp: private member of the device structure,pointer to s2io_nic structure.
5518  *   @skb : the socket buffer pointer.
5519  *   @len : length of the packet
5520  *   @cksum : FCS checksum of the frame.
5521  *   @ring_no : the ring from which this RxD was extracted.
5522  *   Description:
5523  *   This function is called by the Tx interrupt serivce routine to perform
5524  *   some OS related operations on the SKB before passing it to the upper
5525  *   layers. It mainly checks if the checksum is OK, if so adds it to the
5526  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
5527  *   to the upper layer. If the checksum is wrong, it increments the Rx
5528  *   packet error count, frees the SKB and returns error.
5529  *   Return value:
5530  *   SUCCESS on success and -1 on failure.
5531  */
5532 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5533 {
5534         nic_t *sp = ring_data->nic;
5535         struct net_device *dev = (struct net_device *) sp->dev;
5536         struct sk_buff *skb = (struct sk_buff *)
5537                 ((unsigned long) rxdp->Host_Control);
5538         int ring_no = ring_data->ring_no;
5539         u16 l3_csum, l4_csum;
5540 #ifdef CONFIG_2BUFF_MODE
5541         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5542         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5543         int get_block = ring_data->rx_curr_get_info.block_index;
5544         int get_off = ring_data->rx_curr_get_info.offset;
5545         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5546         unsigned char *buff;
5547 #else
5548         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5549 #endif
5550         skb->dev = dev;
5551         if (rxdp->Control_1 & RXD_T_CODE) {
5552                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5553                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5554                           dev->name, err);
5555                 dev_kfree_skb(skb);
5556                 sp->stats.rx_crc_errors++;
5557                 atomic_dec(&sp->rx_bufs_left[ring_no]);
5558                 rxdp->Host_Control = 0;
5559                 return 0;
5560         }
5561
5562         /* Updating statistics */
5563         rxdp->Host_Control = 0;
5564         sp->rx_pkt_count++;
5565         sp->stats.rx_packets++;
5566 #ifndef CONFIG_2BUFF_MODE
5567         sp->stats.rx_bytes += len;
5568 #else
5569         sp->stats.rx_bytes += buf0_len + buf2_len;
5570 #endif
5571
5572 #ifndef CONFIG_2BUFF_MODE
5573         skb_put(skb, len);
5574 #else
5575         buff = skb_push(skb, buf0_len);
5576         memcpy(buff, ba->ba_0, buf0_len);
5577         skb_put(skb, buf2_len);
5578 #endif
5579
5580         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5581             (sp->rx_csum)) {
5582                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5583                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5584                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5585                         /*
5586                          * NIC verifies if the Checksum of the received
5587                          * frame is Ok or not and accordingly returns
5588                          * a flag in the RxD.
5589                          */
5590                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5591                 } else {
5592                         /*
5593                          * Packet with erroneous checksum, let the
5594                          * upper layers deal with it.
5595                          */
5596                         skb->ip_summed = CHECKSUM_NONE;
5597                 }
5598         } else {
5599                 skb->ip_summed = CHECKSUM_NONE;
5600         }
5601
5602         skb->protocol = eth_type_trans(skb, dev);
5603 #ifdef CONFIG_S2IO_NAPI
5604         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5605                 /* Queueing the vlan frame to the upper layer */
5606                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5607                         RXD_GET_VLAN_TAG(rxdp->Control_2));
5608         } else {
5609                 netif_receive_skb(skb);
5610         }
5611 #else
5612         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5613                 /* Queueing the vlan frame to the upper layer */
5614                 vlan_hwaccel_rx(skb, sp->vlgrp,
5615                         RXD_GET_VLAN_TAG(rxdp->Control_2));
5616         } else {
5617                 netif_rx(skb);
5618         }
5619 #endif
5620         dev->last_rx = jiffies;
5621         atomic_dec(&sp->rx_bufs_left[ring_no]);
5622         return SUCCESS;
5623 }
5624
5625 /**
5626  *  s2io_link - stops/starts the Tx queue.
5627  *  @sp : private member of the device structure, which is a pointer to the
5628  *  s2io_nic structure.
5629  *  @link : inidicates whether link is UP/DOWN.
5630  *  Description:
5631  *  This function stops/starts the Tx queue depending on whether the link
5632  *  status of the NIC is is down or up. This is called by the Alarm
5633  *  interrupt handler whenever a link change interrupt comes up.
5634  *  Return value:
5635  *  void.
5636  */
5637
5638 void s2io_link(nic_t * sp, int link)
5639 {
5640         struct net_device *dev = (struct net_device *) sp->dev;
5641
5642         if (link != sp->last_link_state) {
5643                 if (link == LINK_DOWN) {
5644                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5645                         netif_carrier_off(dev);
5646                 } else {
5647                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5648                         netif_carrier_on(dev);
5649                 }
5650         }
5651         sp->last_link_state = link;
5652 }
5653
5654 /**
5655  *  get_xena_rev_id - to identify revision ID of xena.
5656  *  @pdev : PCI Dev structure
5657  *  Description:
5658  *  Function to identify the Revision ID of xena.
5659  *  Return value:
5660  *  returns the revision ID of the device.
5661  */
5662
5663 int get_xena_rev_id(struct pci_dev *pdev)
5664 {
5665         u8 id = 0;
5666         int ret;
5667         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5668         return id;
5669 }
5670
5671 /**
5672  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5673  *  @sp : private member of the device structure, which is a pointer to the
5674  *  s2io_nic structure.
5675  *  Description:
5676  *  This function initializes a few of the PCI and PCI-X configuration registers
5677  *  with recommended values.
5678  *  Return value:
5679  *  void
5680  */
5681
5682 static void s2io_init_pci(nic_t * sp)
5683 {
5684         u16 pci_cmd = 0, pcix_cmd = 0;
5685
5686         /* Enable Data Parity Error Recovery in PCI-X command register. */
5687         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5688                              &(pcix_cmd));
5689         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5690                               (pcix_cmd | 1));
5691         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5692                              &(pcix_cmd));
5693
5694         /* Set the PErr Response bit in PCI command register. */
5695         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5696         pci_write_config_word(sp->pdev, PCI_COMMAND,
5697                               (pci_cmd | PCI_COMMAND_PARITY));
5698         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5699
5700         /* Forcibly disabling relaxed ordering capability of the card. */
5701         pcix_cmd &= 0xfffd;
5702         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5703                               pcix_cmd);
5704         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5705                              &(pcix_cmd));
5706 }
5707
5708 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5709 MODULE_LICENSE("GPL");
5710 MODULE_VERSION(DRV_VERSION);
5711
5712 module_param(tx_fifo_num, int, 0);
5713 module_param(rx_ring_num, int, 0);
5714 module_param_array(tx_fifo_len, uint, NULL, 0);
5715 module_param_array(rx_ring_sz, uint, NULL, 0);
5716 module_param_array(rts_frm_len, uint, NULL, 0);
5717 module_param(use_continuous_tx_intrs, int, 1);
5718 module_param(rmac_pause_time, int, 0);
5719 module_param(mc_pause_threshold_q0q3, int, 0);
5720 module_param(mc_pause_threshold_q4q7, int, 0);
5721 module_param(shared_splits, int, 0);
5722 module_param(tmac_util_period, int, 0);
5723 module_param(rmac_util_period, int, 0);
5724 module_param(bimodal, bool, 0);
5725 #ifndef CONFIG_S2IO_NAPI
5726 module_param(indicate_max_pkts, int, 0);
5727 #endif
5728 module_param(rxsync_frequency, int, 0);
5729 module_param(intr_type, int, 0);
5730
5731 /**
5732  *  s2io_init_nic - Initialization of the adapter .
5733  *  @pdev : structure containing the PCI related information of the device.
5734  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5735  *  Description:
5736  *  The function initializes an adapter identified by the pci_dec structure.
5737  *  All OS related initialization including memory and device structure and
5738  *  initlaization of the device private variable is done. Also the swapper
5739  *  control register is initialized to enable read and write into the I/O
5740  *  registers of the device.
5741  *  Return value:
5742  *  returns 0 on success and negative on failure.
5743  */
5744
5745 static int __devinit
5746 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5747 {
5748         nic_t *sp;
5749         struct net_device *dev;
5750         int i, j, ret;
5751         int dma_flag = FALSE;
5752         u32 mac_up, mac_down;
5753         u64 val64 = 0, tmp64 = 0;
5754         XENA_dev_config_t __iomem *bar0 = NULL;
5755         u16 subid;
5756         mac_info_t *mac_control;
5757         struct config_param *config;
5758         int mode;
5759         u8 dev_intr_type = intr_type;
5760
5761 #ifdef CONFIG_S2IO_NAPI
5762         if (dev_intr_type != INTA) {
5763                 DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
5764 is enabled. Defaulting to INTA\n");
5765                 dev_intr_type = INTA;
5766         }
5767         else
5768                 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5769 #endif
5770
5771         if ((ret = pci_enable_device(pdev))) {
5772                 DBG_PRINT(ERR_DBG,
5773                           "s2io_init_nic: pci_enable_device failed\n");
5774                 return ret;
5775         }
5776
5777         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5778                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5779                 dma_flag = TRUE;
5780                 if (pci_set_consistent_dma_mask
5781                     (pdev, DMA_64BIT_MASK)) {
5782                         DBG_PRINT(ERR_DBG,
5783                                   "Unable to obtain 64bit DMA for \
5784                                         consistent allocations\n");
5785                         pci_disable_device(pdev);
5786                         return -ENOMEM;
5787                 }
5788         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5789                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5790         } else {
5791                 pci_disable_device(pdev);
5792                 return -ENOMEM;
5793         }
5794
5795         if ((dev_intr_type == MSI_X) && 
5796                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
5797                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
5798                 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
5799 Defaulting to INTA\n");
5800                 dev_intr_type = INTA;
5801         }
5802         if (dev_intr_type != MSI_X) {
5803                 if (pci_request_regions(pdev, s2io_driver_name)) {
5804                         DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5805                             pci_disable_device(pdev);
5806                         return -ENODEV;
5807                 }
5808         }
5809         else {
5810                 if (!(request_mem_region(pci_resource_start(pdev, 0),
5811                          pci_resource_len(pdev, 0), s2io_driver_name))) {
5812                         DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
5813                         pci_disable_device(pdev);
5814                         return -ENODEV;
5815                 }
5816                 if (!(request_mem_region(pci_resource_start(pdev, 2),
5817                          pci_resource_len(pdev, 2), s2io_driver_name))) {
5818                         DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
5819                         release_mem_region(pci_resource_start(pdev, 0),
5820                                    pci_resource_len(pdev, 0));
5821                         pci_disable_device(pdev);
5822                         return -ENODEV;
5823                 }
5824         }
5825
5826         dev = alloc_etherdev(sizeof(nic_t));
5827         if (dev == NULL) {
5828                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5829                 pci_disable_device(pdev);
5830                 pci_release_regions(pdev);
5831                 return -ENODEV;
5832         }
5833
5834         pci_set_master(pdev);
5835         pci_set_drvdata(pdev, dev);
5836         SET_MODULE_OWNER(dev);
5837         SET_NETDEV_DEV(dev, &pdev->dev);
5838
5839         /*  Private member variable initialized to s2io NIC structure */
5840         sp = dev->priv;
5841         memset(sp, 0, sizeof(nic_t));
5842         sp->dev = dev;
5843         sp->pdev = pdev;
5844         sp->high_dma_flag = dma_flag;
5845         sp->device_enabled_once = FALSE;
5846         sp->intr_type = dev_intr_type;
5847
5848         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5849                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5850                 sp->device_type = XFRAME_II_DEVICE;
5851         else
5852                 sp->device_type = XFRAME_I_DEVICE;
5853
5854                 
5855         /* Initialize some PCI/PCI-X fields of the NIC. */
5856         s2io_init_pci(sp);
5857
5858         /*
5859          * Setting the device configuration parameters.
5860          * Most of these parameters can be specified by the user during
5861          * module insertion as they are module loadable parameters. If
5862          * these parameters are not not specified during load time, they
5863          * are initialized with default values.
5864          */
5865         mac_control = &sp->mac_control;
5866         config = &sp->config;
5867
5868         /* Tx side parameters. */
5869         if (tx_fifo_len[0] == 0)
5870                 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5871         config->tx_fifo_num = tx_fifo_num;
5872         for (i = 0; i < MAX_TX_FIFOS; i++) {
5873                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5874                 config->tx_cfg[i].fifo_priority = i;
5875         }
5876
5877         /* mapping the QoS priority to the configured fifos */
5878         for (i = 0; i < MAX_TX_FIFOS; i++)
5879                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5880
5881         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5882         for (i = 0; i < config->tx_fifo_num; i++) {
5883                 config->tx_cfg[i].f_no_snoop =
5884                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5885                 if (config->tx_cfg[i].fifo_len < 65) {
5886                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5887                         break;
5888                 }
5889         }
5890         config->max_txds = MAX_SKB_FRAGS + 1;
5891
5892         /* Rx side parameters. */
5893         if (rx_ring_sz[0] == 0)
5894                 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5895         config->rx_ring_num = rx_ring_num;
5896         for (i = 0; i < MAX_RX_RINGS; i++) {
5897                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5898                     (MAX_RXDS_PER_BLOCK + 1);
5899                 config->rx_cfg[i].ring_priority = i;
5900         }
5901
5902         for (i = 0; i < rx_ring_num; i++) {
5903                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5904                 config->rx_cfg[i].f_no_snoop =
5905                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5906         }
5907
5908         /*  Setting Mac Control parameters */
5909         mac_control->rmac_pause_time = rmac_pause_time;
5910         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5911         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5912
5913
5914         /* Initialize Ring buffer parameters. */
5915         for (i = 0; i < config->rx_ring_num; i++)
5916                 atomic_set(&sp->rx_bufs_left[i], 0);
5917
5918         /* Initialize the number of ISRs currently running */
5919         atomic_set(&sp->isr_cnt, 0);
5920
5921         /*  initialize the shared memory used by the NIC and the host */
5922         if (init_shared_mem(sp)) {
5923                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5924                           __FUNCTION__);
5925                 ret = -ENOMEM;
5926                 goto mem_alloc_failed;
5927         }
5928
5929         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5930                                      pci_resource_len(pdev, 0));
5931         if (!sp->bar0) {
5932                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5933                           dev->name);
5934                 ret = -ENOMEM;
5935                 goto bar0_remap_failed;
5936         }
5937
5938         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5939                                      pci_resource_len(pdev, 2));
5940         if (!sp->bar1) {
5941                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5942                           dev->name);
5943                 ret = -ENOMEM;
5944                 goto bar1_remap_failed;
5945         }
5946
5947         dev->irq = pdev->irq;
5948         dev->base_addr = (unsigned long) sp->bar0;
5949
5950         /* Initializing the BAR1 address as the start of the FIFO pointer. */
5951         for (j = 0; j < MAX_TX_FIFOS; j++) {
5952                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5953                     (sp->bar1 + (j * 0x00020000));
5954         }
5955
5956         /*  Driver entry points */
5957         dev->open = &s2io_open;
5958         dev->stop = &s2io_close;
5959         dev->hard_start_xmit = &s2io_xmit;
5960         dev->get_stats = &s2io_get_stats;
5961         dev->set_multicast_list = &s2io_set_multicast;
5962         dev->do_ioctl = &s2io_ioctl;
5963         dev->change_mtu = &s2io_change_mtu;
5964         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5965         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5966         dev->vlan_rx_register = s2io_vlan_rx_register;
5967         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5968
5969         /*
5970          * will use eth_mac_addr() for  dev->set_mac_address
5971          * mac address will be set every time dev->open() is called
5972          */
5973 #if defined(CONFIG_S2IO_NAPI)
5974         dev->poll = s2io_poll;
5975         dev->weight = 32;
5976 #endif
5977
5978         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5979         if (sp->high_dma_flag == TRUE)
5980                 dev->features |= NETIF_F_HIGHDMA;
5981 #ifdef NETIF_F_TSO
5982         dev->features |= NETIF_F_TSO;
5983 #endif
5984
5985         dev->tx_timeout = &s2io_tx_watchdog;
5986         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5987         INIT_WORK(&sp->rst_timer_task,
5988                   (void (*)(void *)) s2io_restart_nic, dev);
5989         INIT_WORK(&sp->set_link_task,
5990                   (void (*)(void *)) s2io_set_link, sp);
5991
5992         pci_save_state(sp->pdev);
5993
5994         /* Setting swapper control on the NIC, for proper reset operation */
5995         if (s2io_set_swapper(sp)) {
5996                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5997                           dev->name);
5998                 ret = -EAGAIN;
5999                 goto set_swap_failed;
6000         }
6001
6002         /* Verify if the Herc works on the slot its placed into */
6003         if (sp->device_type & XFRAME_II_DEVICE) {
6004                 mode = s2io_verify_pci_mode(sp);
6005                 if (mode < 0) {
6006                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
6007                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
6008                         ret = -EBADSLT;
6009                         goto set_swap_failed;
6010                 }
6011         }
6012
6013         /* Not needed for Herc */
6014         if (sp->device_type & XFRAME_I_DEVICE) {
6015                 /*
6016                  * Fix for all "FFs" MAC address problems observed on
6017                  * Alpha platforms
6018                  */
6019                 fix_mac_address(sp);
6020                 s2io_reset(sp);
6021         }
6022
6023         /*
6024          * MAC address initialization.
6025          * For now only one mac address will be read and used.
6026          */
6027         bar0 = sp->bar0;
6028         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
6029             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
6030         writeq(val64, &bar0->rmac_addr_cmd_mem);
6031         wait_for_cmd_complete(sp);
6032
6033         tmp64 = readq(&bar0->rmac_addr_data0_mem);
6034         mac_down = (u32) tmp64;
6035         mac_up = (u32) (tmp64 >> 32);
6036
6037         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
6038
6039         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
6040         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
6041         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
6042         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
6043         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
6044         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
6045
6046         /*  Set the factory defined MAC address initially   */
6047         dev->addr_len = ETH_ALEN;
6048         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
6049
6050         /*
6051          * Initialize the tasklet status and link state flags
6052          * and the card state parameter
6053          */
6054         atomic_set(&(sp->card_state), 0);
6055         sp->tasklet_status = 0;
6056         sp->link_state = 0;
6057
6058         /* Initialize spinlocks */
6059         spin_lock_init(&sp->tx_lock);
6060 #ifndef CONFIG_S2IO_NAPI
6061         spin_lock_init(&sp->put_lock);
6062 #endif
6063         spin_lock_init(&sp->rx_lock);
6064
6065         /*
6066          * SXE-002: Configure link and activity LED to init state
6067          * on driver load.
6068          */
6069         subid = sp->pdev->subsystem_device;
6070         if ((subid & 0xFF) >= 0x07) {
6071                 val64 = readq(&bar0->gpio_control);
6072                 val64 |= 0x0000800000000000ULL;
6073                 writeq(val64, &bar0->gpio_control);
6074                 val64 = 0x0411040400000000ULL;
6075                 writeq(val64, (void __iomem *) bar0 + 0x2700);
6076                 val64 = readq(&bar0->gpio_control);
6077         }
6078
6079         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
6080
6081         if (register_netdev(dev)) {
6082                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
6083                 ret = -ENODEV;
6084                 goto register_failed;
6085         }
6086
6087         if (sp->device_type & XFRAME_II_DEVICE) {
6088                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
6089                           dev->name);
6090                 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6091                                 get_xena_rev_id(sp->pdev),
6092                                 s2io_driver_version);
6093 #ifdef CONFIG_2BUFF_MODE
6094                 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6095 #endif
6096                 switch(sp->intr_type) {
6097                         case INTA:
6098                                 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6099                                 break;
6100                         case MSI:
6101                                 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6102                                 break;
6103                         case MSI_X:
6104                                 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6105                                 break;
6106                 }
6107
6108                 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6109                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6110                           sp->def_mac_addr[0].mac_addr[0],
6111                           sp->def_mac_addr[0].mac_addr[1],
6112                           sp->def_mac_addr[0].mac_addr[2],
6113                           sp->def_mac_addr[0].mac_addr[3],
6114                           sp->def_mac_addr[0].mac_addr[4],
6115                           sp->def_mac_addr[0].mac_addr[5]);
6116                 mode = s2io_print_pci_mode(sp);
6117                 if (mode < 0) {
6118                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
6119                         ret = -EBADSLT;
6120                         goto set_swap_failed;
6121                 }
6122         } else {
6123                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
6124                           dev->name);
6125                 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6126                                         get_xena_rev_id(sp->pdev),
6127                                         s2io_driver_version);
6128 #ifdef CONFIG_2BUFF_MODE
6129                 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6130 #endif
6131                 switch(sp->intr_type) {
6132                         case INTA:
6133                                 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6134                                 break;
6135                         case MSI:
6136                                 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6137                                 break;
6138                         case MSI_X:
6139                                 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6140                                 break;
6141                 }
6142                 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6143                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6144                           sp->def_mac_addr[0].mac_addr[0],
6145                           sp->def_mac_addr[0].mac_addr[1],
6146                           sp->def_mac_addr[0].mac_addr[2],
6147                           sp->def_mac_addr[0].mac_addr[3],
6148                           sp->def_mac_addr[0].mac_addr[4],
6149                           sp->def_mac_addr[0].mac_addr[5]);
6150         }
6151
6152         /* Initialize device name */
6153         strcpy(sp->name, dev->name);
6154         if (sp->device_type & XFRAME_II_DEVICE)
6155                 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
6156         else
6157                 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
6158
6159         /* Initialize bimodal Interrupts */
6160         sp->config.bimodal = bimodal;
6161         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
6162                 sp->config.bimodal = 0;
6163                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
6164                         dev->name);
6165         }
6166
6167         /*
6168          * Make Link state as off at this point, when the Link change
6169          * interrupt comes the state will be automatically changed to
6170          * the right state.
6171          */
6172         netif_carrier_off(dev);
6173
6174         return 0;
6175
6176       register_failed:
6177       set_swap_failed:
6178         iounmap(sp->bar1);
6179       bar1_remap_failed:
6180         iounmap(sp->bar0);
6181       bar0_remap_failed:
6182       mem_alloc_failed:
6183         free_shared_mem(sp);
6184         pci_disable_device(pdev);
6185         if (dev_intr_type != MSI_X)
6186                 pci_release_regions(pdev);
6187         else {
6188                 release_mem_region(pci_resource_start(pdev, 0),
6189                         pci_resource_len(pdev, 0));
6190                 release_mem_region(pci_resource_start(pdev, 2),
6191                         pci_resource_len(pdev, 2));
6192         }
6193         pci_set_drvdata(pdev, NULL);
6194         free_netdev(dev);
6195
6196         return ret;
6197 }
6198
6199 /**
6200  * s2io_rem_nic - Free the PCI device
6201  * @pdev: structure containing the PCI related information of the device.
6202  * Description: This function is called by the Pci subsystem to release a
6203  * PCI device and free up all resource held up by the device. This could
6204  * be in response to a Hot plug event or when the driver is to be removed
6205  * from memory.
6206  */
6207
6208 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
6209 {
6210         struct net_device *dev =
6211             (struct net_device *) pci_get_drvdata(pdev);
6212         nic_t *sp;
6213
6214         if (dev == NULL) {
6215                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
6216                 return;
6217         }
6218
6219         sp = dev->priv;
6220         unregister_netdev(dev);
6221
6222         free_shared_mem(sp);
6223         iounmap(sp->bar0);
6224         iounmap(sp->bar1);
6225         pci_disable_device(pdev);
6226         if (sp->intr_type != MSI_X)
6227                 pci_release_regions(pdev);
6228         else {
6229                 release_mem_region(pci_resource_start(pdev, 0),
6230                         pci_resource_len(pdev, 0));
6231                 release_mem_region(pci_resource_start(pdev, 2),
6232                         pci_resource_len(pdev, 2));
6233         }
6234         pci_set_drvdata(pdev, NULL);
6235         free_netdev(dev);
6236 }
6237
6238 /**
6239  * s2io_starter - Entry point for the driver
6240  * Description: This function is the entry point for the driver. It verifies
6241  * the module loadable parameters and initializes PCI configuration space.
6242  */
6243
6244 int __init s2io_starter(void)
6245 {
6246         return pci_module_init(&s2io_driver);
6247 }
6248
6249 /**
6250  * s2io_closer - Cleanup routine for the driver
6251  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
6252  */
6253
6254 void s2io_closer(void)
6255 {
6256         pci_unregister_driver(&s2io_driver);
6257         DBG_PRINT(INIT_DBG, "cleanup done\n");
6258 }
6259
6260 module_init(s2io_starter);
6261 module_exit(s2io_closer);