[PATCH] ixgb: Fix Broadcast/Multicast packets received statistics
[safe/jmp/linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_len: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
59
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
62 #include <asm/io.h>
63
64 /* local include */
65 #include "s2io.h"
66 #include "s2io-regs.h"
67
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 2.0.2.1";
71
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73 {
74         int ret;
75
76         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79         return ret;
80 }
81
82 /*
83  * Cards with following subsystem_id have a link state indication
84  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85  * macro below identifies these cards given the subsystem_id.
86  */
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88         (dev_type == XFRAME_I_DEVICE) ?                 \
89                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
91
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95 #define PANIC   1
96 #define LOW     2
97 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98 {
99         int level = 0;
100         mac_info_t *mac_control;
101
102         mac_control = &sp->mac_control;
103         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
104                 level = LOW;
105                 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
106                         level = PANIC;
107                 }
108         }
109
110         return level;
111 }
112
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115         "Register test\t(offline)",
116         "Eeprom test\t(offline)",
117         "Link test\t(online)",
118         "RLDRAM test\t(offline)",
119         "BIST Test\t(offline)"
120 };
121
122 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123         {"tmac_frms"},
124         {"tmac_data_octets"},
125         {"tmac_drop_frms"},
126         {"tmac_mcst_frms"},
127         {"tmac_bcst_frms"},
128         {"tmac_pause_ctrl_frms"},
129         {"tmac_any_err_frms"},
130         {"tmac_vld_ip_octets"},
131         {"tmac_vld_ip"},
132         {"tmac_drop_ip"},
133         {"tmac_icmp"},
134         {"tmac_rst_tcp"},
135         {"tmac_tcp"},
136         {"tmac_udp"},
137         {"rmac_vld_frms"},
138         {"rmac_data_octets"},
139         {"rmac_fcs_err_frms"},
140         {"rmac_drop_frms"},
141         {"rmac_vld_mcst_frms"},
142         {"rmac_vld_bcst_frms"},
143         {"rmac_in_rng_len_err_frms"},
144         {"rmac_long_frms"},
145         {"rmac_pause_ctrl_frms"},
146         {"rmac_discarded_frms"},
147         {"rmac_usized_frms"},
148         {"rmac_osized_frms"},
149         {"rmac_frag_frms"},
150         {"rmac_jabber_frms"},
151         {"rmac_ip"},
152         {"rmac_ip_octets"},
153         {"rmac_hdr_err_ip"},
154         {"rmac_drop_ip"},
155         {"rmac_icmp"},
156         {"rmac_tcp"},
157         {"rmac_udp"},
158         {"rmac_err_drp_udp"},
159         {"rmac_pause_cnt"},
160         {"rmac_accepted_ip"},
161         {"rmac_err_tcp"},
162         {"\n DRIVER STATISTICS"},
163         {"single_bit_ecc_errs"},
164         {"double_bit_ecc_errs"},
165 };
166
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169
170 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
172
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
174                         init_timer(&timer);                     \
175                         timer.function = handle;                \
176                         timer.data = (unsigned long) arg;       \
177                         mod_timer(&timer, (jiffies + exp))      \
178
179 /* Add the vlan */
180 static void s2io_vlan_rx_register(struct net_device *dev,
181                                         struct vlan_group *grp)
182 {
183         nic_t *nic = dev->priv;
184         unsigned long flags;
185
186         spin_lock_irqsave(&nic->tx_lock, flags);
187         nic->vlgrp = grp;
188         spin_unlock_irqrestore(&nic->tx_lock, flags);
189 }
190
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193 {
194         nic_t *nic = dev->priv;
195         unsigned long flags;
196
197         spin_lock_irqsave(&nic->tx_lock, flags);
198         if (nic->vlgrp)
199                 nic->vlgrp->vlan_devices[vid] = NULL;
200         spin_unlock_irqrestore(&nic->tx_lock, flags);
201 }
202
203 /*
204  * Constants to be programmed into the Xena's registers, to configure
205  * the XAUI.
206  */
207
208 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
209 #define END_SIGN        0x0
210
211 static u64 herc_act_dtx_cfg[] = {
212         /* Set address */
213         0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
214         /* Write data */
215         0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
216         /* Set address */
217         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218         /* Write data */
219         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220         /* Set address */
221         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
222         /* Write data */
223         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
224         /* Done */
225         END_SIGN
226 };
227
228 static u64 xena_mdio_cfg[] = {
229         /* Reset PMA PLL */
230         0xC001010000000000ULL, 0xC0010100000000E0ULL,
231         0xC0010100008000E4ULL,
232         /* Remove Reset from PMA PLL */
233         0xC001010000000000ULL, 0xC0010100000000E0ULL,
234         0xC0010100000000E4ULL,
235         END_SIGN
236 };
237
238 static u64 xena_dtx_cfg[] = {
239         0x8000051500000000ULL, 0x80000515000000E0ULL,
240         0x80000515D93500E4ULL, 0x8001051500000000ULL,
241         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
242         0x8002051500000000ULL, 0x80020515000000E0ULL,
243         0x80020515F21000E4ULL,
244         /* Set PADLOOPBACKN */
245         0x8002051500000000ULL, 0x80020515000000E0ULL,
246         0x80020515B20000E4ULL, 0x8003051500000000ULL,
247         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
248         0x8004051500000000ULL, 0x80040515000000E0ULL,
249         0x80040515B20000E4ULL, 0x8005051500000000ULL,
250         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
251         SWITCH_SIGN,
252         /* Remove PADLOOPBACKN */
253         0x8002051500000000ULL, 0x80020515000000E0ULL,
254         0x80020515F20000E4ULL, 0x8003051500000000ULL,
255         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
256         0x8004051500000000ULL, 0x80040515000000E0ULL,
257         0x80040515F20000E4ULL, 0x8005051500000000ULL,
258         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
259         END_SIGN
260 };
261
262 /*
263  * Constants for Fixing the MacAddress problem seen mostly on
264  * Alpha machines.
265  */
266 static u64 fix_mac[] = {
267         0x0060000000000000ULL, 0x0060600000000000ULL,
268         0x0040600000000000ULL, 0x0000600000000000ULL,
269         0x0020600000000000ULL, 0x0060600000000000ULL,
270         0x0020600000000000ULL, 0x0060600000000000ULL,
271         0x0020600000000000ULL, 0x0060600000000000ULL,
272         0x0020600000000000ULL, 0x0060600000000000ULL,
273         0x0020600000000000ULL, 0x0060600000000000ULL,
274         0x0020600000000000ULL, 0x0060600000000000ULL,
275         0x0020600000000000ULL, 0x0060600000000000ULL,
276         0x0020600000000000ULL, 0x0060600000000000ULL,
277         0x0020600000000000ULL, 0x0060600000000000ULL,
278         0x0020600000000000ULL, 0x0060600000000000ULL,
279         0x0020600000000000ULL, 0x0000600000000000ULL,
280         0x0040600000000000ULL, 0x0060600000000000ULL,
281         END_SIGN
282 };
283
284 /* Module Loadable parameters. */
285 static unsigned int tx_fifo_num = 1;
286 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
287     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
288 static unsigned int rx_ring_num = 1;
289 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
290     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
291 static unsigned int rts_frm_len[MAX_RX_RINGS] =
292     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
293 static unsigned int use_continuous_tx_intrs = 1;
294 static unsigned int rmac_pause_time = 65535;
295 static unsigned int mc_pause_threshold_q0q3 = 187;
296 static unsigned int mc_pause_threshold_q4q7 = 187;
297 static unsigned int shared_splits;
298 static unsigned int tmac_util_period = 5;
299 static unsigned int rmac_util_period = 5;
300 static unsigned int bimodal = 0;
301 #ifndef CONFIG_S2IO_NAPI
302 static unsigned int indicate_max_pkts;
303 #endif
304 /* Frequency of Rx desc syncs expressed as power of 2 */
305 static unsigned int rxsync_frequency = 3;
306
307 /*
308  * S2IO device table.
309  * This table lists all the devices that this driver supports.
310  */
311 static struct pci_device_id s2io_tbl[] __devinitdata = {
312         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
313          PCI_ANY_ID, PCI_ANY_ID},
314         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
315          PCI_ANY_ID, PCI_ANY_ID},
316         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
317          PCI_ANY_ID, PCI_ANY_ID},
318         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
319          PCI_ANY_ID, PCI_ANY_ID},
320         {0,}
321 };
322
323 MODULE_DEVICE_TABLE(pci, s2io_tbl);
324
325 static struct pci_driver s2io_driver = {
326       .name = "S2IO",
327       .id_table = s2io_tbl,
328       .probe = s2io_init_nic,
329       .remove = __devexit_p(s2io_rem_nic),
330 };
331
332 /* A simplifier macro used both by init and free shared_mem Fns(). */
333 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
334
335 /**
336  * init_shared_mem - Allocation and Initialization of Memory
337  * @nic: Device private variable.
338  * Description: The function allocates all the memory areas shared
339  * between the NIC and the driver. This includes Tx descriptors,
340  * Rx descriptors and the statistics block.
341  */
342
343 static int init_shared_mem(struct s2io_nic *nic)
344 {
345         u32 size;
346         void *tmp_v_addr, *tmp_v_addr_next;
347         dma_addr_t tmp_p_addr, tmp_p_addr_next;
348         RxD_block_t *pre_rxd_blk = NULL;
349         int i, j, blk_cnt, rx_sz, tx_sz;
350         int lst_size, lst_per_page;
351         struct net_device *dev = nic->dev;
352 #ifdef CONFIG_2BUFF_MODE
353         u64 tmp;
354         buffAdd_t *ba;
355 #endif
356
357         mac_info_t *mac_control;
358         struct config_param *config;
359
360         mac_control = &nic->mac_control;
361         config = &nic->config;
362
363
364         /* Allocation and initialization of TXDLs in FIOFs */
365         size = 0;
366         for (i = 0; i < config->tx_fifo_num; i++) {
367                 size += config->tx_cfg[i].fifo_len;
368         }
369         if (size > MAX_AVAILABLE_TXDS) {
370                 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
371                           __FUNCTION__);
372                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
373                 return FAILURE;
374         }
375
376         lst_size = (sizeof(TxD_t) * config->max_txds);
377         tx_sz = lst_size * size;
378         lst_per_page = PAGE_SIZE / lst_size;
379
380         for (i = 0; i < config->tx_fifo_num; i++) {
381                 int fifo_len = config->tx_cfg[i].fifo_len;
382                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
383                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
384                                                           GFP_KERNEL);
385                 if (!mac_control->fifos[i].list_info) {
386                         DBG_PRINT(ERR_DBG,
387                                   "Malloc failed for list_info\n");
388                         return -ENOMEM;
389                 }
390                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
391         }
392         for (i = 0; i < config->tx_fifo_num; i++) {
393                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
394                                                 lst_per_page);
395                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
396                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
397                     config->tx_cfg[i].fifo_len - 1;
398                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
399                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
400                     config->tx_cfg[i].fifo_len - 1;
401                 mac_control->fifos[i].fifo_no = i;
402                 mac_control->fifos[i].nic = nic;
403                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
404
405                 for (j = 0; j < page_num; j++) {
406                         int k = 0;
407                         dma_addr_t tmp_p;
408                         void *tmp_v;
409                         tmp_v = pci_alloc_consistent(nic->pdev,
410                                                      PAGE_SIZE, &tmp_p);
411                         if (!tmp_v) {
412                                 DBG_PRINT(ERR_DBG,
413                                           "pci_alloc_consistent ");
414                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
415                                 return -ENOMEM;
416                         }
417                         while (k < lst_per_page) {
418                                 int l = (j * lst_per_page) + k;
419                                 if (l == config->tx_cfg[i].fifo_len)
420                                         break;
421                                 mac_control->fifos[i].list_info[l].list_virt_addr =
422                                     tmp_v + (k * lst_size);
423                                 mac_control->fifos[i].list_info[l].list_phy_addr =
424                                     tmp_p + (k * lst_size);
425                                 k++;
426                         }
427                 }
428         }
429
430         /* Allocation and initialization of RXDs in Rings */
431         size = 0;
432         for (i = 0; i < config->rx_ring_num; i++) {
433                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
434                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
435                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
436                                   i);
437                         DBG_PRINT(ERR_DBG, "RxDs per Block");
438                         return FAILURE;
439                 }
440                 size += config->rx_cfg[i].num_rxd;
441                 mac_control->rings[i].block_count =
442                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443                 mac_control->rings[i].pkt_cnt =
444                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
445         }
446         size = (size * (sizeof(RxD_t)));
447         rx_sz = size;
448
449         for (i = 0; i < config->rx_ring_num; i++) {
450                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
451                 mac_control->rings[i].rx_curr_get_info.offset = 0;
452                 mac_control->rings[i].rx_curr_get_info.ring_len =
453                     config->rx_cfg[i].num_rxd - 1;
454                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
455                 mac_control->rings[i].rx_curr_put_info.offset = 0;
456                 mac_control->rings[i].rx_curr_put_info.ring_len =
457                     config->rx_cfg[i].num_rxd - 1;
458                 mac_control->rings[i].nic = nic;
459                 mac_control->rings[i].ring_no = i;
460
461                 blk_cnt =
462                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
463                 /*  Allocating all the Rx blocks */
464                 for (j = 0; j < blk_cnt; j++) {
465 #ifndef CONFIG_2BUFF_MODE
466                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
467 #else
468                         size = SIZE_OF_BLOCK;
469 #endif
470                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
471                                                           &tmp_p_addr);
472                         if (tmp_v_addr == NULL) {
473                                 /*
474                                  * In case of failure, free_shared_mem()
475                                  * is called, which should free any
476                                  * memory that was alloced till the
477                                  * failure happened.
478                                  */
479                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
480                                     tmp_v_addr;
481                                 return -ENOMEM;
482                         }
483                         memset(tmp_v_addr, 0, size);
484                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
485                                 tmp_v_addr;
486                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
487                                 tmp_p_addr;
488                 }
489                 /* Interlinking all Rx Blocks */
490                 for (j = 0; j < blk_cnt; j++) {
491                         tmp_v_addr =
492                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
493                         tmp_v_addr_next =
494                                 mac_control->rings[i].rx_blocks[(j + 1) %
495                                               blk_cnt].block_virt_addr;
496                         tmp_p_addr =
497                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
498                         tmp_p_addr_next =
499                                 mac_control->rings[i].rx_blocks[(j + 1) %
500                                               blk_cnt].block_dma_addr;
501
502                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
503                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
504                                                                  * marker.
505                                                                  */
506 #ifndef CONFIG_2BUFF_MODE
507                         pre_rxd_blk->reserved_2_pNext_RxD_block =
508                             (unsigned long) tmp_v_addr_next;
509 #endif
510                         pre_rxd_blk->pNext_RxD_Blk_physical =
511                             (u64) tmp_p_addr_next;
512                 }
513         }
514
515 #ifdef CONFIG_2BUFF_MODE
516         /*
517          * Allocation of Storages for buffer addresses in 2BUFF mode
518          * and the buffers as well.
519          */
520         for (i = 0; i < config->rx_ring_num; i++) {
521                 blk_cnt =
522                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
523                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
524                                      GFP_KERNEL);
525                 if (!mac_control->rings[i].ba)
526                         return -ENOMEM;
527                 for (j = 0; j < blk_cnt; j++) {
528                         int k = 0;
529                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
530                                                  (MAX_RXDS_PER_BLOCK + 1)),
531                                                 GFP_KERNEL);
532                         if (!mac_control->rings[i].ba[j])
533                                 return -ENOMEM;
534                         while (k != MAX_RXDS_PER_BLOCK) {
535                                 ba = &mac_control->rings[i].ba[j][k];
536
537                                 ba->ba_0_org = (void *) kmalloc
538                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
539                                 if (!ba->ba_0_org)
540                                         return -ENOMEM;
541                                 tmp = (u64) ba->ba_0_org;
542                                 tmp += ALIGN_SIZE;
543                                 tmp &= ~((u64) ALIGN_SIZE);
544                                 ba->ba_0 = (void *) tmp;
545
546                                 ba->ba_1_org = (void *) kmalloc
547                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
548                                 if (!ba->ba_1_org)
549                                         return -ENOMEM;
550                                 tmp = (u64) ba->ba_1_org;
551                                 tmp += ALIGN_SIZE;
552                                 tmp &= ~((u64) ALIGN_SIZE);
553                                 ba->ba_1 = (void *) tmp;
554                                 k++;
555                         }
556                 }
557         }
558 #endif
559
560         /* Allocation and initialization of Statistics block */
561         size = sizeof(StatInfo_t);
562         mac_control->stats_mem = pci_alloc_consistent
563             (nic->pdev, size, &mac_control->stats_mem_phy);
564
565         if (!mac_control->stats_mem) {
566                 /*
567                  * In case of failure, free_shared_mem() is called, which
568                  * should free any memory that was alloced till the
569                  * failure happened.
570                  */
571                 return -ENOMEM;
572         }
573         mac_control->stats_mem_sz = size;
574
575         tmp_v_addr = mac_control->stats_mem;
576         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
577         memset(tmp_v_addr, 0, size);
578         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
579                   (unsigned long long) tmp_p_addr);
580
581         return SUCCESS;
582 }
583
584 /**
585  * free_shared_mem - Free the allocated Memory
586  * @nic:  Device private variable.
587  * Description: This function is to free all memory locations allocated by
588  * the init_shared_mem() function and return it to the kernel.
589  */
590
591 static void free_shared_mem(struct s2io_nic *nic)
592 {
593         int i, j, blk_cnt, size;
594         void *tmp_v_addr;
595         dma_addr_t tmp_p_addr;
596         mac_info_t *mac_control;
597         struct config_param *config;
598         int lst_size, lst_per_page;
599
600
601         if (!nic)
602                 return;
603
604         mac_control = &nic->mac_control;
605         config = &nic->config;
606
607         lst_size = (sizeof(TxD_t) * config->max_txds);
608         lst_per_page = PAGE_SIZE / lst_size;
609
610         for (i = 0; i < config->tx_fifo_num; i++) {
611                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
612                                                 lst_per_page);
613                 for (j = 0; j < page_num; j++) {
614                         int mem_blks = (j * lst_per_page);
615                         if ((!mac_control->fifos[i].list_info) ||
616                                 (!mac_control->fifos[i].list_info[mem_blks].
617                                  list_virt_addr))
618                                 break;
619                         pci_free_consistent(nic->pdev, PAGE_SIZE,
620                                             mac_control->fifos[i].
621                                             list_info[mem_blks].
622                                             list_virt_addr,
623                                             mac_control->fifos[i].
624                                             list_info[mem_blks].
625                                             list_phy_addr);
626                 }
627                 kfree(mac_control->fifos[i].list_info);
628         }
629
630 #ifndef CONFIG_2BUFF_MODE
631         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
632 #else
633         size = SIZE_OF_BLOCK;
634 #endif
635         for (i = 0; i < config->rx_ring_num; i++) {
636                 blk_cnt = mac_control->rings[i].block_count;
637                 for (j = 0; j < blk_cnt; j++) {
638                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
639                                 block_virt_addr;
640                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
641                                 block_dma_addr;
642                         if (tmp_v_addr == NULL)
643                                 break;
644                         pci_free_consistent(nic->pdev, size,
645                                             tmp_v_addr, tmp_p_addr);
646                 }
647         }
648
649 #ifdef CONFIG_2BUFF_MODE
650         /* Freeing buffer storage addresses in 2BUFF mode. */
651         for (i = 0; i < config->rx_ring_num; i++) {
652                 blk_cnt =
653                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
654                 for (j = 0; j < blk_cnt; j++) {
655                         int k = 0;
656                         if (!mac_control->rings[i].ba[j])
657                                 continue;
658                         while (k != MAX_RXDS_PER_BLOCK) {
659                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
660                                 kfree(ba->ba_0_org);
661                                 kfree(ba->ba_1_org);
662                                 k++;
663                         }
664                         kfree(mac_control->rings[i].ba[j]);
665                 }
666                 if (mac_control->rings[i].ba)
667                         kfree(mac_control->rings[i].ba);
668         }
669 #endif
670
671         if (mac_control->stats_mem) {
672                 pci_free_consistent(nic->pdev,
673                                     mac_control->stats_mem_sz,
674                                     mac_control->stats_mem,
675                                     mac_control->stats_mem_phy);
676         }
677 }
678
679 /**
680  * s2io_verify_pci_mode -
681  */
682
683 static int s2io_verify_pci_mode(nic_t *nic)
684 {
685         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
686         register u64 val64 = 0;
687         int     mode;
688
689         val64 = readq(&bar0->pci_mode);
690         mode = (u8)GET_PCI_MODE(val64);
691
692         if ( val64 & PCI_MODE_UNKNOWN_MODE)
693                 return -1;      /* Unknown PCI mode */
694         return mode;
695 }
696
697
698 /**
699  * s2io_print_pci_mode -
700  */
701 static int s2io_print_pci_mode(nic_t *nic)
702 {
703         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
704         register u64 val64 = 0;
705         int     mode;
706         struct config_param *config = &nic->config;
707
708         val64 = readq(&bar0->pci_mode);
709         mode = (u8)GET_PCI_MODE(val64);
710
711         if ( val64 & PCI_MODE_UNKNOWN_MODE)
712                 return -1;      /* Unknown PCI mode */
713
714         if (val64 & PCI_MODE_32_BITS) {
715                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
716         } else {
717                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
718         }
719
720         switch(mode) {
721                 case PCI_MODE_PCI_33:
722                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
723                         config->bus_speed = 33;
724                         break;
725                 case PCI_MODE_PCI_66:
726                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
727                         config->bus_speed = 133;
728                         break;
729                 case PCI_MODE_PCIX_M1_66:
730                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
731                         config->bus_speed = 133; /* Herc doubles the clock rate */
732                         break;
733                 case PCI_MODE_PCIX_M1_100:
734                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
735                         config->bus_speed = 200;
736                         break;
737                 case PCI_MODE_PCIX_M1_133:
738                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
739                         config->bus_speed = 266;
740                         break;
741                 case PCI_MODE_PCIX_M2_66:
742                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
743                         config->bus_speed = 133;
744                         break;
745                 case PCI_MODE_PCIX_M2_100:
746                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
747                         config->bus_speed = 200;
748                         break;
749                 case PCI_MODE_PCIX_M2_133:
750                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
751                         config->bus_speed = 266;
752                         break;
753                 default:
754                         return -1;      /* Unsupported bus speed */
755         }
756
757         return mode;
758 }
759
760 /**
761  *  init_nic - Initialization of hardware
762  *  @nic: device peivate variable
763  *  Description: The function sequentially configures every block
764  *  of the H/W from their reset values.
765  *  Return Value:  SUCCESS on success and
766  *  '-1' on failure (endian settings incorrect).
767  */
768
769 static int init_nic(struct s2io_nic *nic)
770 {
771         XENA_dev_config_t __iomem *bar0 = nic->bar0;
772         struct net_device *dev = nic->dev;
773         register u64 val64 = 0;
774         void __iomem *add;
775         u32 time;
776         int i, j;
777         mac_info_t *mac_control;
778         struct config_param *config;
779         int mdio_cnt = 0, dtx_cnt = 0;
780         unsigned long long mem_share;
781         int mem_size;
782
783         mac_control = &nic->mac_control;
784         config = &nic->config;
785
786         /* to set the swapper controle on the card */
787         if(s2io_set_swapper(nic)) {
788                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
789                 return -1;
790         }
791
792         /*
793          * Herc requires EOI to be removed from reset before XGXS, so..
794          */
795         if (nic->device_type & XFRAME_II_DEVICE) {
796                 val64 = 0xA500000000ULL;
797                 writeq(val64, &bar0->sw_reset);
798                 msleep(500);
799                 val64 = readq(&bar0->sw_reset);
800         }
801
802         /* Remove XGXS from reset state */
803         val64 = 0;
804         writeq(val64, &bar0->sw_reset);
805         msleep(500);
806         val64 = readq(&bar0->sw_reset);
807
808         /*  Enable Receiving broadcasts */
809         add = &bar0->mac_cfg;
810         val64 = readq(&bar0->mac_cfg);
811         val64 |= MAC_RMAC_BCAST_ENABLE;
812         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
813         writel((u32) val64, add);
814         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
815         writel((u32) (val64 >> 32), (add + 4));
816
817         /* Read registers in all blocks */
818         val64 = readq(&bar0->mac_int_mask);
819         val64 = readq(&bar0->mc_int_mask);
820         val64 = readq(&bar0->xgxs_int_mask);
821
822         /*  Set MTU */
823         val64 = dev->mtu;
824         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
825
826         /*
827          * Configuring the XAUI Interface of Xena.
828          * ***************************************
829          * To Configure the Xena's XAUI, one has to write a series
830          * of 64 bit values into two registers in a particular
831          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
832          * which will be defined in the array of configuration values
833          * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
834          * to switch writing from one regsiter to another. We continue
835          * writing these values until we encounter the 'END_SIGN' macro.
836          * For example, After making a series of 21 writes into
837          * dtx_control register the 'SWITCH_SIGN' appears and hence we
838          * start writing into mdio_control until we encounter END_SIGN.
839          */
840         if (nic->device_type & XFRAME_II_DEVICE) {
841                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
842                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
843                                           &bar0->dtx_control, UF);
844                         if (dtx_cnt & 0x1)
845                                 msleep(1); /* Necessary!! */
846                         dtx_cnt++;
847                 }
848         } else {
849                 while (1) {
850                       dtx_cfg:
851                         while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
852                                 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
853                                         dtx_cnt++;
854                                         goto mdio_cfg;
855                                 }
856                                 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
857                                                   &bar0->dtx_control, UF);
858                                 val64 = readq(&bar0->dtx_control);
859                                 dtx_cnt++;
860                         }
861                       mdio_cfg:
862                         while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
863                                 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
864                                         mdio_cnt++;
865                                         goto dtx_cfg;
866                                 }
867                                 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
868                                                   &bar0->mdio_control, UF);
869                                 val64 = readq(&bar0->mdio_control);
870                                 mdio_cnt++;
871                         }
872                         if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
873                             (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
874                                 break;
875                         } else {
876                                 goto dtx_cfg;
877                         }
878                 }
879         }
880
881         /*  Tx DMA Initialization */
882         val64 = 0;
883         writeq(val64, &bar0->tx_fifo_partition_0);
884         writeq(val64, &bar0->tx_fifo_partition_1);
885         writeq(val64, &bar0->tx_fifo_partition_2);
886         writeq(val64, &bar0->tx_fifo_partition_3);
887
888
889         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
890                 val64 |=
891                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
892                          13) | vBIT(config->tx_cfg[i].fifo_priority,
893                                     ((i * 32) + 5), 3);
894
895                 if (i == (config->tx_fifo_num - 1)) {
896                         if (i % 2 == 0)
897                                 i++;
898                 }
899
900                 switch (i) {
901                 case 1:
902                         writeq(val64, &bar0->tx_fifo_partition_0);
903                         val64 = 0;
904                         break;
905                 case 3:
906                         writeq(val64, &bar0->tx_fifo_partition_1);
907                         val64 = 0;
908                         break;
909                 case 5:
910                         writeq(val64, &bar0->tx_fifo_partition_2);
911                         val64 = 0;
912                         break;
913                 case 7:
914                         writeq(val64, &bar0->tx_fifo_partition_3);
915                         break;
916                 }
917         }
918
919         /* Enable Tx FIFO partition 0. */
920         val64 = readq(&bar0->tx_fifo_partition_0);
921         val64 |= BIT(0);        /* To enable the FIFO partition. */
922         writeq(val64, &bar0->tx_fifo_partition_0);
923
924         /*
925          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
926          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
927          */
928         if ((nic->device_type == XFRAME_I_DEVICE) &&
929                 (get_xena_rev_id(nic->pdev) < 4))
930                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
931
932         val64 = readq(&bar0->tx_fifo_partition_0);
933         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
934                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
935
936         /*
937          * Initialization of Tx_PA_CONFIG register to ignore packet
938          * integrity checking.
939          */
940         val64 = readq(&bar0->tx_pa_cfg);
941         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
942             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
943         writeq(val64, &bar0->tx_pa_cfg);
944
945         /* Rx DMA intialization. */
946         val64 = 0;
947         for (i = 0; i < config->rx_ring_num; i++) {
948                 val64 |=
949                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
950                          3);
951         }
952         writeq(val64, &bar0->rx_queue_priority);
953
954         /*
955          * Allocating equal share of memory to all the
956          * configured Rings.
957          */
958         val64 = 0;
959         if (nic->device_type & XFRAME_II_DEVICE)
960                 mem_size = 32;
961         else
962                 mem_size = 64;
963
964         for (i = 0; i < config->rx_ring_num; i++) {
965                 switch (i) {
966                 case 0:
967                         mem_share = (mem_size / config->rx_ring_num +
968                                      mem_size % config->rx_ring_num);
969                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
970                         continue;
971                 case 1:
972                         mem_share = (mem_size / config->rx_ring_num);
973                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
974                         continue;
975                 case 2:
976                         mem_share = (mem_size / config->rx_ring_num);
977                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
978                         continue;
979                 case 3:
980                         mem_share = (mem_size / config->rx_ring_num);
981                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
982                         continue;
983                 case 4:
984                         mem_share = (mem_size / config->rx_ring_num);
985                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
986                         continue;
987                 case 5:
988                         mem_share = (mem_size / config->rx_ring_num);
989                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
990                         continue;
991                 case 6:
992                         mem_share = (mem_size / config->rx_ring_num);
993                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
994                         continue;
995                 case 7:
996                         mem_share = (mem_size / config->rx_ring_num);
997                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
998                         continue;
999                 }
1000         }
1001         writeq(val64, &bar0->rx_queue_cfg);
1002
1003         /*
1004          * Filling Tx round robin registers
1005          * as per the number of FIFOs
1006          */
1007         switch (config->tx_fifo_num) {
1008         case 1:
1009                 val64 = 0x0000000000000000ULL;
1010                 writeq(val64, &bar0->tx_w_round_robin_0);
1011                 writeq(val64, &bar0->tx_w_round_robin_1);
1012                 writeq(val64, &bar0->tx_w_round_robin_2);
1013                 writeq(val64, &bar0->tx_w_round_robin_3);
1014                 writeq(val64, &bar0->tx_w_round_robin_4);
1015                 break;
1016         case 2:
1017                 val64 = 0x0000010000010000ULL;
1018                 writeq(val64, &bar0->tx_w_round_robin_0);
1019                 val64 = 0x0100000100000100ULL;
1020                 writeq(val64, &bar0->tx_w_round_robin_1);
1021                 val64 = 0x0001000001000001ULL;
1022                 writeq(val64, &bar0->tx_w_round_robin_2);
1023                 val64 = 0x0000010000010000ULL;
1024                 writeq(val64, &bar0->tx_w_round_robin_3);
1025                 val64 = 0x0100000000000000ULL;
1026                 writeq(val64, &bar0->tx_w_round_robin_4);
1027                 break;
1028         case 3:
1029                 val64 = 0x0001000102000001ULL;
1030                 writeq(val64, &bar0->tx_w_round_robin_0);
1031                 val64 = 0x0001020000010001ULL;
1032                 writeq(val64, &bar0->tx_w_round_robin_1);
1033                 val64 = 0x0200000100010200ULL;
1034                 writeq(val64, &bar0->tx_w_round_robin_2);
1035                 val64 = 0x0001000102000001ULL;
1036                 writeq(val64, &bar0->tx_w_round_robin_3);
1037                 val64 = 0x0001020000000000ULL;
1038                 writeq(val64, &bar0->tx_w_round_robin_4);
1039                 break;
1040         case 4:
1041                 val64 = 0x0001020300010200ULL;
1042                 writeq(val64, &bar0->tx_w_round_robin_0);
1043                 val64 = 0x0100000102030001ULL;
1044                 writeq(val64, &bar0->tx_w_round_robin_1);
1045                 val64 = 0x0200010000010203ULL;
1046                 writeq(val64, &bar0->tx_w_round_robin_2);
1047                 val64 = 0x0001020001000001ULL;
1048                 writeq(val64, &bar0->tx_w_round_robin_3);
1049                 val64 = 0x0203000100000000ULL;
1050                 writeq(val64, &bar0->tx_w_round_robin_4);
1051                 break;
1052         case 5:
1053                 val64 = 0x0001000203000102ULL;
1054                 writeq(val64, &bar0->tx_w_round_robin_0);
1055                 val64 = 0x0001020001030004ULL;
1056                 writeq(val64, &bar0->tx_w_round_robin_1);
1057                 val64 = 0x0001000203000102ULL;
1058                 writeq(val64, &bar0->tx_w_round_robin_2);
1059                 val64 = 0x0001020001030004ULL;
1060                 writeq(val64, &bar0->tx_w_round_robin_3);
1061                 val64 = 0x0001000000000000ULL;
1062                 writeq(val64, &bar0->tx_w_round_robin_4);
1063                 break;
1064         case 6:
1065                 val64 = 0x0001020304000102ULL;
1066                 writeq(val64, &bar0->tx_w_round_robin_0);
1067                 val64 = 0x0304050001020001ULL;
1068                 writeq(val64, &bar0->tx_w_round_robin_1);
1069                 val64 = 0x0203000100000102ULL;
1070                 writeq(val64, &bar0->tx_w_round_robin_2);
1071                 val64 = 0x0304000102030405ULL;
1072                 writeq(val64, &bar0->tx_w_round_robin_3);
1073                 val64 = 0x0001000200000000ULL;
1074                 writeq(val64, &bar0->tx_w_round_robin_4);
1075                 break;
1076         case 7:
1077                 val64 = 0x0001020001020300ULL;
1078                 writeq(val64, &bar0->tx_w_round_robin_0);
1079                 val64 = 0x0102030400010203ULL;
1080                 writeq(val64, &bar0->tx_w_round_robin_1);
1081                 val64 = 0x0405060001020001ULL;
1082                 writeq(val64, &bar0->tx_w_round_robin_2);
1083                 val64 = 0x0304050000010200ULL;
1084                 writeq(val64, &bar0->tx_w_round_robin_3);
1085                 val64 = 0x0102030000000000ULL;
1086                 writeq(val64, &bar0->tx_w_round_robin_4);
1087                 break;
1088         case 8:
1089                 val64 = 0x0001020300040105ULL;
1090                 writeq(val64, &bar0->tx_w_round_robin_0);
1091                 val64 = 0x0200030106000204ULL;
1092                 writeq(val64, &bar0->tx_w_round_robin_1);
1093                 val64 = 0x0103000502010007ULL;
1094                 writeq(val64, &bar0->tx_w_round_robin_2);
1095                 val64 = 0x0304010002060500ULL;
1096                 writeq(val64, &bar0->tx_w_round_robin_3);
1097                 val64 = 0x0103020400000000ULL;
1098                 writeq(val64, &bar0->tx_w_round_robin_4);
1099                 break;
1100         }
1101
1102         /* Filling the Rx round robin registers as per the
1103          * number of Rings and steering based on QoS.
1104          */
1105         switch (config->rx_ring_num) {
1106         case 1:
1107                 val64 = 0x8080808080808080ULL;
1108                 writeq(val64, &bar0->rts_qos_steering);
1109                 break;
1110         case 2:
1111                 val64 = 0x0000010000010000ULL;
1112                 writeq(val64, &bar0->rx_w_round_robin_0);
1113                 val64 = 0x0100000100000100ULL;
1114                 writeq(val64, &bar0->rx_w_round_robin_1);
1115                 val64 = 0x0001000001000001ULL;
1116                 writeq(val64, &bar0->rx_w_round_robin_2);
1117                 val64 = 0x0000010000010000ULL;
1118                 writeq(val64, &bar0->rx_w_round_robin_3);
1119                 val64 = 0x0100000000000000ULL;
1120                 writeq(val64, &bar0->rx_w_round_robin_4);
1121
1122                 val64 = 0x8080808040404040ULL;
1123                 writeq(val64, &bar0->rts_qos_steering);
1124                 break;
1125         case 3:
1126                 val64 = 0x0001000102000001ULL;
1127                 writeq(val64, &bar0->rx_w_round_robin_0);
1128                 val64 = 0x0001020000010001ULL;
1129                 writeq(val64, &bar0->rx_w_round_robin_1);
1130                 val64 = 0x0200000100010200ULL;
1131                 writeq(val64, &bar0->rx_w_round_robin_2);
1132                 val64 = 0x0001000102000001ULL;
1133                 writeq(val64, &bar0->rx_w_round_robin_3);
1134                 val64 = 0x0001020000000000ULL;
1135                 writeq(val64, &bar0->rx_w_round_robin_4);
1136
1137                 val64 = 0x8080804040402020ULL;
1138                 writeq(val64, &bar0->rts_qos_steering);
1139                 break;
1140         case 4:
1141                 val64 = 0x0001020300010200ULL;
1142                 writeq(val64, &bar0->rx_w_round_robin_0);
1143                 val64 = 0x0100000102030001ULL;
1144                 writeq(val64, &bar0->rx_w_round_robin_1);
1145                 val64 = 0x0200010000010203ULL;
1146                 writeq(val64, &bar0->rx_w_round_robin_2);
1147                 val64 = 0x0001020001000001ULL;  
1148                 writeq(val64, &bar0->rx_w_round_robin_3);
1149                 val64 = 0x0203000100000000ULL;
1150                 writeq(val64, &bar0->rx_w_round_robin_4);
1151
1152                 val64 = 0x8080404020201010ULL;
1153                 writeq(val64, &bar0->rts_qos_steering);
1154                 break;
1155         case 5:
1156                 val64 = 0x0001000203000102ULL;
1157                 writeq(val64, &bar0->rx_w_round_robin_0);
1158                 val64 = 0x0001020001030004ULL;
1159                 writeq(val64, &bar0->rx_w_round_robin_1);
1160                 val64 = 0x0001000203000102ULL;
1161                 writeq(val64, &bar0->rx_w_round_robin_2);
1162                 val64 = 0x0001020001030004ULL;
1163                 writeq(val64, &bar0->rx_w_round_robin_3);
1164                 val64 = 0x0001000000000000ULL;
1165                 writeq(val64, &bar0->rx_w_round_robin_4);
1166
1167                 val64 = 0x8080404020201008ULL;
1168                 writeq(val64, &bar0->rts_qos_steering);
1169                 break;
1170         case 6:
1171                 val64 = 0x0001020304000102ULL;
1172                 writeq(val64, &bar0->rx_w_round_robin_0);
1173                 val64 = 0x0304050001020001ULL;
1174                 writeq(val64, &bar0->rx_w_round_robin_1);
1175                 val64 = 0x0203000100000102ULL;
1176                 writeq(val64, &bar0->rx_w_round_robin_2);
1177                 val64 = 0x0304000102030405ULL;
1178                 writeq(val64, &bar0->rx_w_round_robin_3);
1179                 val64 = 0x0001000200000000ULL;
1180                 writeq(val64, &bar0->rx_w_round_robin_4);
1181
1182                 val64 = 0x8080404020100804ULL;
1183                 writeq(val64, &bar0->rts_qos_steering);
1184                 break;
1185         case 7:
1186                 val64 = 0x0001020001020300ULL;
1187                 writeq(val64, &bar0->rx_w_round_robin_0);
1188                 val64 = 0x0102030400010203ULL;
1189                 writeq(val64, &bar0->rx_w_round_robin_1);
1190                 val64 = 0x0405060001020001ULL;
1191                 writeq(val64, &bar0->rx_w_round_robin_2);
1192                 val64 = 0x0304050000010200ULL;
1193                 writeq(val64, &bar0->rx_w_round_robin_3);
1194                 val64 = 0x0102030000000000ULL;
1195                 writeq(val64, &bar0->rx_w_round_robin_4);
1196
1197                 val64 = 0x8080402010080402ULL;
1198                 writeq(val64, &bar0->rts_qos_steering);
1199                 break;
1200         case 8:
1201                 val64 = 0x0001020300040105ULL;
1202                 writeq(val64, &bar0->rx_w_round_robin_0);
1203                 val64 = 0x0200030106000204ULL;
1204                 writeq(val64, &bar0->rx_w_round_robin_1);
1205                 val64 = 0x0103000502010007ULL;
1206                 writeq(val64, &bar0->rx_w_round_robin_2);
1207                 val64 = 0x0304010002060500ULL;
1208                 writeq(val64, &bar0->rx_w_round_robin_3);
1209                 val64 = 0x0103020400000000ULL;
1210                 writeq(val64, &bar0->rx_w_round_robin_4);
1211
1212                 val64 = 0x8040201008040201ULL;
1213                 writeq(val64, &bar0->rts_qos_steering);
1214                 break;
1215         }
1216
1217         /* UDP Fix */
1218         val64 = 0;
1219         for (i = 0; i < 8; i++)
1220                 writeq(val64, &bar0->rts_frm_len_n[i]);
1221
1222         /* Set the default rts frame length for the rings configured */
1223         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1224         for (i = 0 ; i < config->rx_ring_num ; i++)
1225                 writeq(val64, &bar0->rts_frm_len_n[i]);
1226
1227         /* Set the frame length for the configured rings
1228          * desired by the user
1229          */
1230         for (i = 0; i < config->rx_ring_num; i++) {
1231                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1232                  * specified frame length steering.
1233                  * If the user provides the frame length then program
1234                  * the rts_frm_len register for those values or else
1235                  * leave it as it is.
1236                  */
1237                 if (rts_frm_len[i] != 0) {
1238                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1239                                 &bar0->rts_frm_len_n[i]);
1240                 }
1241         }
1242
1243         /* Program statistics memory */
1244         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1245
1246         if (nic->device_type == XFRAME_II_DEVICE) {
1247                 val64 = STAT_BC(0x320);
1248                 writeq(val64, &bar0->stat_byte_cnt);
1249         }
1250
1251         /*
1252          * Initializing the sampling rate for the device to calculate the
1253          * bandwidth utilization.
1254          */
1255         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1256             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1257         writeq(val64, &bar0->mac_link_util);
1258
1259
1260         /*
1261          * Initializing the Transmit and Receive Traffic Interrupt
1262          * Scheme.
1263          */
1264         /*
1265          * TTI Initialization. Default Tx timer gets us about
1266          * 250 interrupts per sec. Continuous interrupts are enabled
1267          * by default.
1268          */
1269         if (nic->device_type == XFRAME_II_DEVICE) {
1270                 int count = (nic->config.bus_speed * 125)/2;
1271                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1272         } else {
1273
1274                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1275         }
1276         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1277             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1278             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1279                 if (use_continuous_tx_intrs)
1280                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1281         writeq(val64, &bar0->tti_data1_mem);
1282
1283         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1284             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1285             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1286         writeq(val64, &bar0->tti_data2_mem);
1287
1288         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1289         writeq(val64, &bar0->tti_command_mem);
1290
1291         /*
1292          * Once the operation completes, the Strobe bit of the command
1293          * register will be reset. We poll for this particular condition
1294          * We wait for a maximum of 500ms for the operation to complete,
1295          * if it's not complete by then we return error.
1296          */
1297         time = 0;
1298         while (TRUE) {
1299                 val64 = readq(&bar0->tti_command_mem);
1300                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1301                         break;
1302                 }
1303                 if (time > 10) {
1304                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1305                                   dev->name);
1306                         return -1;
1307                 }
1308                 msleep(50);
1309                 time++;
1310         }
1311
1312         if (nic->config.bimodal) {
1313                 int k = 0;
1314                 for (k = 0; k < config->rx_ring_num; k++) {
1315                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1316                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1317                         writeq(val64, &bar0->tti_command_mem);
1318
1319                 /*
1320                  * Once the operation completes, the Strobe bit of the command
1321                  * register will be reset. We poll for this particular condition
1322                  * We wait for a maximum of 500ms for the operation to complete,
1323                  * if it's not complete by then we return error.
1324                 */
1325                         time = 0;
1326                         while (TRUE) {
1327                                 val64 = readq(&bar0->tti_command_mem);
1328                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1329                                         break;
1330                                 }
1331                                 if (time > 10) {
1332                                         DBG_PRINT(ERR_DBG,
1333                                                 "%s: TTI init Failed\n",
1334                                         dev->name);
1335                                         return -1;
1336                                 }
1337                                 time++;
1338                                 msleep(50);
1339                         }
1340                 }
1341         } else {
1342
1343                 /* RTI Initialization */
1344                 if (nic->device_type == XFRAME_II_DEVICE) {
1345                         /*
1346                          * Programmed to generate Apprx 500 Intrs per
1347                          * second
1348                          */
1349                         int count = (nic->config.bus_speed * 125)/4;
1350                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1351                 } else {
1352                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1353                 }
1354                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1355                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1356                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1357
1358                 writeq(val64, &bar0->rti_data1_mem);
1359
1360                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1361                     RTI_DATA2_MEM_RX_UFC_B(0x2) |
1362                     RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1363                 writeq(val64, &bar0->rti_data2_mem);
1364
1365                 for (i = 0; i < config->rx_ring_num; i++) {
1366                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1367                                         | RTI_CMD_MEM_OFFSET(i);
1368                         writeq(val64, &bar0->rti_command_mem);
1369
1370                         /*
1371                          * Once the operation completes, the Strobe bit of the
1372                          * command register will be reset. We poll for this
1373                          * particular condition. We wait for a maximum of 500ms
1374                          * for the operation to complete, if it's not complete
1375                          * by then we return error.
1376                          */
1377                         time = 0;
1378                         while (TRUE) {
1379                                 val64 = readq(&bar0->rti_command_mem);
1380                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1381                                         break;
1382                                 }
1383                                 if (time > 10) {
1384                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1385                                                   dev->name);
1386                                         return -1;
1387                                 }
1388                                 time++;
1389                                 msleep(50);
1390                         }
1391                 }
1392         }
1393
1394         /*
1395          * Initializing proper values as Pause threshold into all
1396          * the 8 Queues on Rx side.
1397          */
1398         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1399         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1400
1401         /* Disable RMAC PAD STRIPPING */
1402         add = (void *) &bar0->mac_cfg;
1403         val64 = readq(&bar0->mac_cfg);
1404         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1405         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1406         writel((u32) (val64), add);
1407         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1408         writel((u32) (val64 >> 32), (add + 4));
1409         val64 = readq(&bar0->mac_cfg);
1410
1411         /*
1412          * Set the time value to be inserted in the pause frame
1413          * generated by xena.
1414          */
1415         val64 = readq(&bar0->rmac_pause_cfg);
1416         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1417         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1418         writeq(val64, &bar0->rmac_pause_cfg);
1419
1420         /*
1421          * Set the Threshold Limit for Generating the pause frame
1422          * If the amount of data in any Queue exceeds ratio of
1423          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1424          * pause frame is generated
1425          */
1426         val64 = 0;
1427         for (i = 0; i < 4; i++) {
1428                 val64 |=
1429                     (((u64) 0xFF00 | nic->mac_control.
1430                       mc_pause_threshold_q0q3)
1431                      << (i * 2 * 8));
1432         }
1433         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1434
1435         val64 = 0;
1436         for (i = 0; i < 4; i++) {
1437                 val64 |=
1438                     (((u64) 0xFF00 | nic->mac_control.
1439                       mc_pause_threshold_q4q7)
1440                      << (i * 2 * 8));
1441         }
1442         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1443
1444         /*
1445          * TxDMA will stop Read request if the number of read split has
1446          * exceeded the limit pointed by shared_splits
1447          */
1448         val64 = readq(&bar0->pic_control);
1449         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1450         writeq(val64, &bar0->pic_control);
1451
1452         /*
1453          * Programming the Herc to split every write transaction
1454          * that does not start on an ADB to reduce disconnects.
1455          */
1456         if (nic->device_type == XFRAME_II_DEVICE) {
1457                 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1458                 writeq(val64, &bar0->wreq_split_mask);
1459         }
1460
1461         /* Setting Link stability period to 64 ms */ 
1462         if (nic->device_type == XFRAME_II_DEVICE) {
1463                 val64 = MISC_LINK_STABILITY_PRD(3);
1464                 writeq(val64, &bar0->misc_control);
1465         }
1466
1467         return SUCCESS;
1468 }
1469 #define LINK_UP_DOWN_INTERRUPT          1
1470 #define MAC_RMAC_ERR_TIMER              2
1471
1472 #if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1473 #define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1474 #else
1475 int s2io_link_fault_indication(nic_t *nic)
1476 {
1477         if (nic->device_type == XFRAME_II_DEVICE)
1478                 return LINK_UP_DOWN_INTERRUPT;
1479         else
1480                 return MAC_RMAC_ERR_TIMER;
1481 }
1482 #endif
1483
1484 /**
1485  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1486  *  @nic: device private variable,
1487  *  @mask: A mask indicating which Intr block must be modified and,
1488  *  @flag: A flag indicating whether to enable or disable the Intrs.
1489  *  Description: This function will either disable or enable the interrupts
1490  *  depending on the flag argument. The mask argument can be used to
1491  *  enable/disable any Intr block.
1492  *  Return Value: NONE.
1493  */
1494
1495 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1496 {
1497         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1498         register u64 val64 = 0, temp64 = 0;
1499
1500         /*  Top level interrupt classification */
1501         /*  PIC Interrupts */
1502         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1503                 /*  Enable PIC Intrs in the general intr mask register */
1504                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1505                 if (flag == ENABLE_INTRS) {
1506                         temp64 = readq(&bar0->general_int_mask);
1507                         temp64 &= ~((u64) val64);
1508                         writeq(temp64, &bar0->general_int_mask);
1509                         /*
1510                          * If Hercules adapter enable GPIO otherwise
1511                          * disabled all PCIX, Flash, MDIO, IIC and GPIO
1512                          * interrupts for now.
1513                          * TODO
1514                          */
1515                         if (s2io_link_fault_indication(nic) ==
1516                                         LINK_UP_DOWN_INTERRUPT ) {
1517                                 temp64 = readq(&bar0->pic_int_mask);
1518                                 temp64 &= ~((u64) PIC_INT_GPIO);
1519                                 writeq(temp64, &bar0->pic_int_mask);
1520                                 temp64 = readq(&bar0->gpio_int_mask);
1521                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1522                                 writeq(temp64, &bar0->gpio_int_mask);
1523                         } else {
1524                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1525                         }
1526                         /*
1527                          * No MSI Support is available presently, so TTI and
1528                          * RTI interrupts are also disabled.
1529                          */
1530                 } else if (flag == DISABLE_INTRS) {
1531                         /*
1532                          * Disable PIC Intrs in the general
1533                          * intr mask register
1534                          */
1535                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1536                         temp64 = readq(&bar0->general_int_mask);
1537                         val64 |= temp64;
1538                         writeq(val64, &bar0->general_int_mask);
1539                 }
1540         }
1541
1542         /*  DMA Interrupts */
1543         /*  Enabling/Disabling Tx DMA interrupts */
1544         if (mask & TX_DMA_INTR) {
1545                 /* Enable TxDMA Intrs in the general intr mask register */
1546                 val64 = TXDMA_INT_M;
1547                 if (flag == ENABLE_INTRS) {
1548                         temp64 = readq(&bar0->general_int_mask);
1549                         temp64 &= ~((u64) val64);
1550                         writeq(temp64, &bar0->general_int_mask);
1551                         /*
1552                          * Keep all interrupts other than PFC interrupt
1553                          * and PCC interrupt disabled in DMA level.
1554                          */
1555                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1556                                                       TXDMA_PCC_INT_M);
1557                         writeq(val64, &bar0->txdma_int_mask);
1558                         /*
1559                          * Enable only the MISC error 1 interrupt in PFC block
1560                          */
1561                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1562                         writeq(val64, &bar0->pfc_err_mask);
1563                         /*
1564                          * Enable only the FB_ECC error interrupt in PCC block
1565                          */
1566                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1567                         writeq(val64, &bar0->pcc_err_mask);
1568                 } else if (flag == DISABLE_INTRS) {
1569                         /*
1570                          * Disable TxDMA Intrs in the general intr mask
1571                          * register
1572                          */
1573                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1574                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1575                         temp64 = readq(&bar0->general_int_mask);
1576                         val64 |= temp64;
1577                         writeq(val64, &bar0->general_int_mask);
1578                 }
1579         }
1580
1581         /*  Enabling/Disabling Rx DMA interrupts */
1582         if (mask & RX_DMA_INTR) {
1583                 /*  Enable RxDMA Intrs in the general intr mask register */
1584                 val64 = RXDMA_INT_M;
1585                 if (flag == ENABLE_INTRS) {
1586                         temp64 = readq(&bar0->general_int_mask);
1587                         temp64 &= ~((u64) val64);
1588                         writeq(temp64, &bar0->general_int_mask);
1589                         /*
1590                          * All RxDMA block interrupts are disabled for now
1591                          * TODO
1592                          */
1593                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1594                 } else if (flag == DISABLE_INTRS) {
1595                         /*
1596                          * Disable RxDMA Intrs in the general intr mask
1597                          * register
1598                          */
1599                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1600                         temp64 = readq(&bar0->general_int_mask);
1601                         val64 |= temp64;
1602                         writeq(val64, &bar0->general_int_mask);
1603                 }
1604         }
1605
1606         /*  MAC Interrupts */
1607         /*  Enabling/Disabling MAC interrupts */
1608         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1609                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1610                 if (flag == ENABLE_INTRS) {
1611                         temp64 = readq(&bar0->general_int_mask);
1612                         temp64 &= ~((u64) val64);
1613                         writeq(temp64, &bar0->general_int_mask);
1614                         /*
1615                          * All MAC block error interrupts are disabled for now
1616                          * TODO
1617                          */
1618                 } else if (flag == DISABLE_INTRS) {
1619                         /*
1620                          * Disable MAC Intrs in the general intr mask register
1621                          */
1622                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1623                         writeq(DISABLE_ALL_INTRS,
1624                                &bar0->mac_rmac_err_mask);
1625
1626                         temp64 = readq(&bar0->general_int_mask);
1627                         val64 |= temp64;
1628                         writeq(val64, &bar0->general_int_mask);
1629                 }
1630         }
1631
1632         /*  XGXS Interrupts */
1633         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1634                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1635                 if (flag == ENABLE_INTRS) {
1636                         temp64 = readq(&bar0->general_int_mask);
1637                         temp64 &= ~((u64) val64);
1638                         writeq(temp64, &bar0->general_int_mask);
1639                         /*
1640                          * All XGXS block error interrupts are disabled for now
1641                          * TODO
1642                          */
1643                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1644                 } else if (flag == DISABLE_INTRS) {
1645                         /*
1646                          * Disable MC Intrs in the general intr mask register
1647                          */
1648                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1649                         temp64 = readq(&bar0->general_int_mask);
1650                         val64 |= temp64;
1651                         writeq(val64, &bar0->general_int_mask);
1652                 }
1653         }
1654
1655         /*  Memory Controller(MC) interrupts */
1656         if (mask & MC_INTR) {
1657                 val64 = MC_INT_M;
1658                 if (flag == ENABLE_INTRS) {
1659                         temp64 = readq(&bar0->general_int_mask);
1660                         temp64 &= ~((u64) val64);
1661                         writeq(temp64, &bar0->general_int_mask);
1662                         /*
1663                          * Enable all MC Intrs.
1664                          */
1665                         writeq(0x0, &bar0->mc_int_mask);
1666                         writeq(0x0, &bar0->mc_err_mask);
1667                 } else if (flag == DISABLE_INTRS) {
1668                         /*
1669                          * Disable MC Intrs in the general intr mask register
1670                          */
1671                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1672                         temp64 = readq(&bar0->general_int_mask);
1673                         val64 |= temp64;
1674                         writeq(val64, &bar0->general_int_mask);
1675                 }
1676         }
1677
1678
1679         /*  Tx traffic interrupts */
1680         if (mask & TX_TRAFFIC_INTR) {
1681                 val64 = TXTRAFFIC_INT_M;
1682                 if (flag == ENABLE_INTRS) {
1683                         temp64 = readq(&bar0->general_int_mask);
1684                         temp64 &= ~((u64) val64);
1685                         writeq(temp64, &bar0->general_int_mask);
1686                         /*
1687                          * Enable all the Tx side interrupts
1688                          * writing 0 Enables all 64 TX interrupt levels
1689                          */
1690                         writeq(0x0, &bar0->tx_traffic_mask);
1691                 } else if (flag == DISABLE_INTRS) {
1692                         /*
1693                          * Disable Tx Traffic Intrs in the general intr mask
1694                          * register.
1695                          */
1696                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1697                         temp64 = readq(&bar0->general_int_mask);
1698                         val64 |= temp64;
1699                         writeq(val64, &bar0->general_int_mask);
1700                 }
1701         }
1702
1703         /*  Rx traffic interrupts */
1704         if (mask & RX_TRAFFIC_INTR) {
1705                 val64 = RXTRAFFIC_INT_M;
1706                 if (flag == ENABLE_INTRS) {
1707                         temp64 = readq(&bar0->general_int_mask);
1708                         temp64 &= ~((u64) val64);
1709                         writeq(temp64, &bar0->general_int_mask);
1710                         /* writing 0 Enables all 8 RX interrupt levels */
1711                         writeq(0x0, &bar0->rx_traffic_mask);
1712                 } else if (flag == DISABLE_INTRS) {
1713                         /*
1714                          * Disable Rx Traffic Intrs in the general intr mask
1715                          * register.
1716                          */
1717                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1718                         temp64 = readq(&bar0->general_int_mask);
1719                         val64 |= temp64;
1720                         writeq(val64, &bar0->general_int_mask);
1721                 }
1722         }
1723 }
1724
1725 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1726 {
1727         int ret = 0;
1728
1729         if (flag == FALSE) {
1730                 if ((!herc && (rev_id >= 4)) || herc) {
1731                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1732                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1733                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1734                                 ret = 1;
1735                         }
1736                 }else {
1737                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1738                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1739                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1740                                 ret = 1;
1741                         }
1742                 }
1743         } else {
1744                 if ((!herc && (rev_id >= 4)) || herc) {
1745                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1746                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1747                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1748                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1749                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1750                                 ret = 1;
1751                         }
1752                 } else {
1753                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1754                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1755                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1756                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1757                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1758                                 ret = 1;
1759                         }
1760                 }
1761         }
1762
1763         return ret;
1764 }
1765 /**
1766  *  verify_xena_quiescence - Checks whether the H/W is ready
1767  *  @val64 :  Value read from adapter status register.
1768  *  @flag : indicates if the adapter enable bit was ever written once
1769  *  before.
1770  *  Description: Returns whether the H/W is ready to go or not. Depending
1771  *  on whether adapter enable bit was written or not the comparison
1772  *  differs and the calling function passes the input argument flag to
1773  *  indicate this.
1774  *  Return: 1 If xena is quiescence
1775  *          0 If Xena is not quiescence
1776  */
1777
1778 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1779 {
1780         int ret = 0, herc;
1781         u64 tmp64 = ~((u64) val64);
1782         int rev_id = get_xena_rev_id(sp->pdev);
1783
1784         herc = (sp->device_type == XFRAME_II_DEVICE);
1785         if (!
1786             (tmp64 &
1787              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1788               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1789               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1790               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1791               ADAPTER_STATUS_P_PLL_LOCK))) {
1792                 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1793         }
1794
1795         return ret;
1796 }
1797
1798 /**
1799  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1800  * @sp: Pointer to device specifc structure
1801  * Description :
1802  * New procedure to clear mac address reading  problems on Alpha platforms
1803  *
1804  */
1805
1806 void fix_mac_address(nic_t * sp)
1807 {
1808         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1809         u64 val64;
1810         int i = 0;
1811
1812         while (fix_mac[i] != END_SIGN) {
1813                 writeq(fix_mac[i++], &bar0->gpio_control);
1814                 udelay(10);
1815                 val64 = readq(&bar0->gpio_control);
1816         }
1817 }
1818
1819 /**
1820  *  start_nic - Turns the device on
1821  *  @nic : device private variable.
1822  *  Description:
1823  *  This function actually turns the device on. Before this  function is
1824  *  called,all Registers are configured from their reset states
1825  *  and shared memory is allocated but the NIC is still quiescent. On
1826  *  calling this function, the device interrupts are cleared and the NIC is
1827  *  literally switched on by writing into the adapter control register.
1828  *  Return Value:
1829  *  SUCCESS on success and -1 on failure.
1830  */
1831
1832 static int start_nic(struct s2io_nic *nic)
1833 {
1834         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1835         struct net_device *dev = nic->dev;
1836         register u64 val64 = 0;
1837         u16 interruptible;
1838         u16 subid, i;
1839         mac_info_t *mac_control;
1840         struct config_param *config;
1841
1842         mac_control = &nic->mac_control;
1843         config = &nic->config;
1844
1845         /*  PRC Initialization and configuration */
1846         for (i = 0; i < config->rx_ring_num; i++) {
1847                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1848                        &bar0->prc_rxd0_n[i]);
1849
1850                 val64 = readq(&bar0->prc_ctrl_n[i]);
1851                 if (nic->config.bimodal)
1852                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1853 #ifndef CONFIG_2BUFF_MODE
1854                 val64 |= PRC_CTRL_RC_ENABLED;
1855 #else
1856                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1857 #endif
1858                 writeq(val64, &bar0->prc_ctrl_n[i]);
1859         }
1860
1861 #ifdef CONFIG_2BUFF_MODE
1862         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1863         val64 = readq(&bar0->rx_pa_cfg);
1864         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1865         writeq(val64, &bar0->rx_pa_cfg);
1866 #endif
1867
1868         /*
1869          * Enabling MC-RLDRAM. After enabling the device, we timeout
1870          * for around 100ms, which is approximately the time required
1871          * for the device to be ready for operation.
1872          */
1873         val64 = readq(&bar0->mc_rldram_mrs);
1874         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1875         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1876         val64 = readq(&bar0->mc_rldram_mrs);
1877
1878         msleep(100);    /* Delay by around 100 ms. */
1879
1880         /* Enabling ECC Protection. */
1881         val64 = readq(&bar0->adapter_control);
1882         val64 &= ~ADAPTER_ECC_EN;
1883         writeq(val64, &bar0->adapter_control);
1884
1885         /*
1886          * Clearing any possible Link state change interrupts that
1887          * could have popped up just before Enabling the card.
1888          */
1889         val64 = readq(&bar0->mac_rmac_err_reg);
1890         if (val64)
1891                 writeq(val64, &bar0->mac_rmac_err_reg);
1892
1893         /*
1894          * Verify if the device is ready to be enabled, if so enable
1895          * it.
1896          */
1897         val64 = readq(&bar0->adapter_status);
1898         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1899                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1900                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1901                           (unsigned long long) val64);
1902                 return FAILURE;
1903         }
1904
1905         /*  Enable select interrupts */
1906         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
1907         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1908         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1909
1910         en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1911
1912         /*
1913          * With some switches, link might be already up at this point.
1914          * Because of this weird behavior, when we enable laser,
1915          * we may not get link. We need to handle this. We cannot
1916          * figure out which switch is misbehaving. So we are forced to
1917          * make a global change.
1918          */
1919
1920         /* Enabling Laser. */
1921         val64 = readq(&bar0->adapter_control);
1922         val64 |= ADAPTER_EOI_TX_ON;
1923         writeq(val64, &bar0->adapter_control);
1924
1925         /* SXE-002: Initialize link and activity LED */
1926         subid = nic->pdev->subsystem_device;
1927         if (((subid & 0xFF) >= 0x07) &&
1928             (nic->device_type == XFRAME_I_DEVICE)) {
1929                 val64 = readq(&bar0->gpio_control);
1930                 val64 |= 0x0000800000000000ULL;
1931                 writeq(val64, &bar0->gpio_control);
1932                 val64 = 0x0411040400000000ULL;
1933                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1934         }
1935
1936         /*
1937          * Don't see link state interrupts on certain switches, so
1938          * directly scheduling a link state task from here.
1939          */
1940         schedule_work(&nic->set_link_task);
1941
1942         return SUCCESS;
1943 }
1944
1945 /**
1946  *  free_tx_buffers - Free all queued Tx buffers
1947  *  @nic : device private variable.
1948  *  Description:
1949  *  Free all queued Tx buffers.
1950  *  Return Value: void
1951 */
1952
1953 static void free_tx_buffers(struct s2io_nic *nic)
1954 {
1955         struct net_device *dev = nic->dev;
1956         struct sk_buff *skb;
1957         TxD_t *txdp;
1958         int i, j;
1959         mac_info_t *mac_control;
1960         struct config_param *config;
1961         int cnt = 0, frg_cnt;
1962
1963         mac_control = &nic->mac_control;
1964         config = &nic->config;
1965
1966         for (i = 0; i < config->tx_fifo_num; i++) {
1967                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1968                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1969                             list_virt_addr;
1970                         skb =
1971                             (struct sk_buff *) ((unsigned long) txdp->
1972                                                 Host_Control);
1973                         if (skb == NULL) {
1974                                 memset(txdp, 0, sizeof(TxD_t) *
1975                                        config->max_txds);
1976                                 continue;
1977                         }
1978                         frg_cnt = skb_shinfo(skb)->nr_frags;
1979                         pci_unmap_single(nic->pdev, (dma_addr_t)
1980                                          txdp->Buffer_Pointer,
1981                                          skb->len - skb->data_len,
1982                                          PCI_DMA_TODEVICE);
1983                         if (frg_cnt) {
1984                                 TxD_t *temp;
1985                                 temp = txdp;
1986                                 txdp++;
1987                                 for (j = 0; j < frg_cnt; j++, txdp++) {
1988                                         skb_frag_t *frag =
1989                                             &skb_shinfo(skb)->frags[j];
1990                                         pci_unmap_page(nic->pdev,
1991                                                        (dma_addr_t)
1992                                                        txdp->
1993                                                        Buffer_Pointer,
1994                                                        frag->size,
1995                                                        PCI_DMA_TODEVICE);
1996                                 }
1997                                 txdp = temp;
1998                         }
1999                         dev_kfree_skb(skb);
2000                         memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2001                         cnt++;
2002                 }
2003                 DBG_PRINT(INTR_DBG,
2004                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2005                           dev->name, cnt, i);
2006                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2007                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2008         }
2009 }
2010
2011 /**
2012  *   stop_nic -  To stop the nic
2013  *   @nic ; device private variable.
2014  *   Description:
2015  *   This function does exactly the opposite of what the start_nic()
2016  *   function does. This function is called to stop the device.
2017  *   Return Value:
2018  *   void.
2019  */
2020
2021 static void stop_nic(struct s2io_nic *nic)
2022 {
2023         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2024         register u64 val64 = 0;
2025         u16 interruptible, i;
2026         mac_info_t *mac_control;
2027         struct config_param *config;
2028
2029         mac_control = &nic->mac_control;
2030         config = &nic->config;
2031
2032         /*  Disable all interrupts */
2033         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
2034         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2035         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2036         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2037
2038         /*  Disable PRCs */
2039         for (i = 0; i < config->rx_ring_num; i++) {
2040                 val64 = readq(&bar0->prc_ctrl_n[i]);
2041                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2042                 writeq(val64, &bar0->prc_ctrl_n[i]);
2043         }
2044 }
2045
2046 /**
2047  *  fill_rx_buffers - Allocates the Rx side skbs
2048  *  @nic:  device private variable
2049  *  @ring_no: ring number
2050  *  Description:
2051  *  The function allocates Rx side skbs and puts the physical
2052  *  address of these buffers into the RxD buffer pointers, so that the NIC
2053  *  can DMA the received frame into these locations.
2054  *  The NIC supports 3 receive modes, viz
2055  *  1. single buffer,
2056  *  2. three buffer and
2057  *  3. Five buffer modes.
2058  *  Each mode defines how many fragments the received frame will be split
2059  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2060  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2061  *  is split into 3 fragments. As of now only single buffer mode is
2062  *  supported.
2063  *   Return Value:
2064  *  SUCCESS on success or an appropriate -ve value on failure.
2065  */
2066
2067 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2068 {
2069         struct net_device *dev = nic->dev;
2070         struct sk_buff *skb;
2071         RxD_t *rxdp;
2072         int off, off1, size, block_no, block_no1;
2073         int offset, offset1;
2074         u32 alloc_tab = 0;
2075         u32 alloc_cnt;
2076         mac_info_t *mac_control;
2077         struct config_param *config;
2078 #ifdef CONFIG_2BUFF_MODE
2079         RxD_t *rxdpnext;
2080         int nextblk;
2081         u64 tmp;
2082         buffAdd_t *ba;
2083         dma_addr_t rxdpphys;
2084 #endif
2085 #ifndef CONFIG_S2IO_NAPI
2086         unsigned long flags;
2087 #endif
2088         RxD_t *first_rxdp = NULL;
2089
2090         mac_control = &nic->mac_control;
2091         config = &nic->config;
2092         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2093             atomic_read(&nic->rx_bufs_left[ring_no]);
2094         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2095             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2096
2097         while (alloc_tab < alloc_cnt) {
2098                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2099                     block_index;
2100                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2101                     block_index;
2102                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2103                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2104 #ifndef CONFIG_2BUFF_MODE
2105                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2106                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2107 #else
2108                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2109                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2110 #endif
2111
2112                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2113                     block_virt_addr + off;
2114                 if ((offset == offset1) && (rxdp->Host_Control)) {
2115                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2116                         DBG_PRINT(INTR_DBG, " info equated\n");
2117                         goto end;
2118                 }
2119 #ifndef CONFIG_2BUFF_MODE
2120                 if (rxdp->Control_1 == END_OF_BLOCK) {
2121                         mac_control->rings[ring_no].rx_curr_put_info.
2122                             block_index++;
2123                         mac_control->rings[ring_no].rx_curr_put_info.
2124                             block_index %= mac_control->rings[ring_no].block_count;
2125                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
2126                                 block_index;
2127                         off++;
2128                         off %= (MAX_RXDS_PER_BLOCK + 1);
2129                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2130                             off;
2131                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2132                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2133                                   dev->name, rxdp);
2134                 }
2135 #ifndef CONFIG_S2IO_NAPI
2136                 spin_lock_irqsave(&nic->put_lock, flags);
2137                 mac_control->rings[ring_no].put_pos =
2138                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2139                 spin_unlock_irqrestore(&nic->put_lock, flags);
2140 #endif
2141 #else
2142                 if (rxdp->Host_Control == END_OF_BLOCK) {
2143                         mac_control->rings[ring_no].rx_curr_put_info.
2144                             block_index++;
2145                         mac_control->rings[ring_no].rx_curr_put_info.block_index
2146                             %= mac_control->rings[ring_no].block_count;
2147                         block_no = mac_control->rings[ring_no].rx_curr_put_info
2148                             .block_index;
2149                         off = 0;
2150                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2151                                   dev->name, block_no,
2152                                   (unsigned long long) rxdp->Control_1);
2153                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2154                             off;
2155                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2156                             block_virt_addr;
2157                 }
2158 #ifndef CONFIG_S2IO_NAPI
2159                 spin_lock_irqsave(&nic->put_lock, flags);
2160                 mac_control->rings[ring_no].put_pos = (block_no *
2161                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
2162                 spin_unlock_irqrestore(&nic->put_lock, flags);
2163 #endif
2164 #endif
2165
2166 #ifndef CONFIG_2BUFF_MODE
2167                 if (rxdp->Control_1 & RXD_OWN_XENA)
2168 #else
2169                 if (rxdp->Control_2 & BIT(0))
2170 #endif
2171                 {
2172                         mac_control->rings[ring_no].rx_curr_put_info.
2173                             offset = off;
2174                         goto end;
2175                 }
2176 #ifdef  CONFIG_2BUFF_MODE
2177                 /*
2178                  * RxDs Spanning cache lines will be replenished only
2179                  * if the succeeding RxD is also owned by Host. It
2180                  * will always be the ((8*i)+3) and ((8*i)+6)
2181                  * descriptors for the 48 byte descriptor. The offending
2182                  * decsriptor is of-course the 3rd descriptor.
2183                  */
2184                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2185                     block_dma_addr + (off * sizeof(RxD_t));
2186                 if (((u64) (rxdpphys)) % 128 > 80) {
2187                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2188                             block_virt_addr + (off + 1);
2189                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
2190                                 nextblk = (block_no + 1) %
2191                                     (mac_control->rings[ring_no].block_count);
2192                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
2193                                     [nextblk].block_virt_addr;
2194                         }
2195                         if (rxdpnext->Control_2 & BIT(0))
2196                                 goto end;
2197                 }
2198 #endif
2199
2200 #ifndef CONFIG_2BUFF_MODE
2201                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2202 #else
2203                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2204 #endif
2205                 if (!skb) {
2206                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2207                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2208                         if (first_rxdp) {
2209                                 wmb();
2210                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2211                         }
2212                         return -ENOMEM;
2213                 }
2214 #ifndef CONFIG_2BUFF_MODE
2215                 skb_reserve(skb, NET_IP_ALIGN);
2216                 memset(rxdp, 0, sizeof(RxD_t));
2217                 rxdp->Buffer0_ptr = pci_map_single
2218                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2219                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2220                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2221                 rxdp->Host_Control = (unsigned long) (skb);
2222                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2223                         rxdp->Control_1 |= RXD_OWN_XENA;
2224                 off++;
2225                 off %= (MAX_RXDS_PER_BLOCK + 1);
2226                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2227 #else
2228                 ba = &mac_control->rings[ring_no].ba[block_no][off];
2229                 skb_reserve(skb, BUF0_LEN);
2230                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2231                 if (tmp)
2232                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2233
2234                 memset(rxdp, 0, sizeof(RxD_t));
2235                 rxdp->Buffer2_ptr = pci_map_single
2236                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2237                      PCI_DMA_FROMDEVICE);
2238                 rxdp->Buffer0_ptr =
2239                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2240                                    PCI_DMA_FROMDEVICE);
2241                 rxdp->Buffer1_ptr =
2242                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2243                                    PCI_DMA_FROMDEVICE);
2244
2245                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2246                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2247                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2248                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
2249                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2250                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2251                         rxdp->Control_1 |= RXD_OWN_XENA;
2252                 off++;
2253                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2254 #endif
2255                 rxdp->Control_2 |= SET_RXD_MARKER;
2256
2257                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2258                         if (first_rxdp) {
2259                                 wmb();
2260                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2261                         }
2262                         first_rxdp = rxdp;
2263                 }
2264                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2265                 alloc_tab++;
2266         }
2267
2268       end:
2269         /* Transfer ownership of first descriptor to adapter just before
2270          * exiting. Before that, use memory barrier so that ownership
2271          * and other fields are seen by adapter correctly.
2272          */
2273         if (first_rxdp) {
2274                 wmb();
2275                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2276         }
2277
2278         return SUCCESS;
2279 }
2280
2281 /**
2282  *  free_rx_buffers - Frees all Rx buffers
2283  *  @sp: device private variable.
2284  *  Description:
2285  *  This function will free all Rx buffers allocated by host.
2286  *  Return Value:
2287  *  NONE.
2288  */
2289
2290 static void free_rx_buffers(struct s2io_nic *sp)
2291 {
2292         struct net_device *dev = sp->dev;
2293         int i, j, blk = 0, off, buf_cnt = 0;
2294         RxD_t *rxdp;
2295         struct sk_buff *skb;
2296         mac_info_t *mac_control;
2297         struct config_param *config;
2298 #ifdef CONFIG_2BUFF_MODE
2299         buffAdd_t *ba;
2300 #endif
2301
2302         mac_control = &sp->mac_control;
2303         config = &sp->config;
2304
2305         for (i = 0; i < config->rx_ring_num; i++) {
2306                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2307                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2308                         rxdp = mac_control->rings[i].rx_blocks[blk].
2309                                 block_virt_addr + off;
2310
2311 #ifndef CONFIG_2BUFF_MODE
2312                         if (rxdp->Control_1 == END_OF_BLOCK) {
2313                                 rxdp =
2314                                     (RxD_t *) ((unsigned long) rxdp->
2315                                                Control_2);
2316                                 j++;
2317                                 blk++;
2318                         }
2319 #else
2320                         if (rxdp->Host_Control == END_OF_BLOCK) {
2321                                 blk++;
2322                                 continue;
2323                         }
2324 #endif
2325
2326                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2327                                 memset(rxdp, 0, sizeof(RxD_t));
2328                                 continue;
2329                         }
2330
2331                         skb =
2332                             (struct sk_buff *) ((unsigned long) rxdp->
2333                                                 Host_Control);
2334                         if (skb) {
2335 #ifndef CONFIG_2BUFF_MODE
2336                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2337                                                  rxdp->Buffer0_ptr,
2338                                                  dev->mtu +
2339                                                  HEADER_ETHERNET_II_802_3_SIZE
2340                                                  + HEADER_802_2_SIZE +
2341                                                  HEADER_SNAP_SIZE,
2342                                                  PCI_DMA_FROMDEVICE);
2343 #else
2344                                 ba = &mac_control->rings[i].ba[blk][off];
2345                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2346                                                  rxdp->Buffer0_ptr,
2347                                                  BUF0_LEN,
2348                                                  PCI_DMA_FROMDEVICE);
2349                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2350                                                  rxdp->Buffer1_ptr,
2351                                                  BUF1_LEN,
2352                                                  PCI_DMA_FROMDEVICE);
2353                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2354                                                  rxdp->Buffer2_ptr,
2355                                                  dev->mtu + BUF0_LEN + 4,
2356                                                  PCI_DMA_FROMDEVICE);
2357 #endif
2358                                 dev_kfree_skb(skb);
2359                                 atomic_dec(&sp->rx_bufs_left[i]);
2360                                 buf_cnt++;
2361                         }
2362                         memset(rxdp, 0, sizeof(RxD_t));
2363                 }
2364                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2365                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2366                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2367                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2368                 atomic_set(&sp->rx_bufs_left[i], 0);
2369                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2370                           dev->name, buf_cnt, i);
2371         }
2372 }
2373
2374 /**
2375  * s2io_poll - Rx interrupt handler for NAPI support
2376  * @dev : pointer to the device structure.
2377  * @budget : The number of packets that were budgeted to be processed
2378  * during  one pass through the 'Poll" function.
2379  * Description:
2380  * Comes into picture only if NAPI support has been incorporated. It does
2381  * the same thing that rx_intr_handler does, but not in a interrupt context
2382  * also It will process only a given number of packets.
2383  * Return value:
2384  * 0 on success and 1 if there are No Rx packets to be processed.
2385  */
2386
2387 #if defined(CONFIG_S2IO_NAPI)
2388 static int s2io_poll(struct net_device *dev, int *budget)
2389 {
2390         nic_t *nic = dev->priv;
2391         int pkt_cnt = 0, org_pkts_to_process;
2392         mac_info_t *mac_control;
2393         struct config_param *config;
2394         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2395         u64 val64;
2396         int i;
2397
2398         atomic_inc(&nic->isr_cnt);
2399         mac_control = &nic->mac_control;
2400         config = &nic->config;
2401
2402         nic->pkts_to_process = *budget;
2403         if (nic->pkts_to_process > dev->quota)
2404                 nic->pkts_to_process = dev->quota;
2405         org_pkts_to_process = nic->pkts_to_process;
2406
2407         val64 = readq(&bar0->rx_traffic_int);
2408         writeq(val64, &bar0->rx_traffic_int);
2409
2410         for (i = 0; i < config->rx_ring_num; i++) {
2411                 rx_intr_handler(&mac_control->rings[i]);
2412                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2413                 if (!nic->pkts_to_process) {
2414                         /* Quota for the current iteration has been met */
2415                         goto no_rx;
2416                 }
2417         }
2418         if (!pkt_cnt)
2419                 pkt_cnt = 1;
2420
2421         dev->quota -= pkt_cnt;
2422         *budget -= pkt_cnt;
2423         netif_rx_complete(dev);
2424
2425         for (i = 0; i < config->rx_ring_num; i++) {
2426                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2427                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2428                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2429                         break;
2430                 }
2431         }
2432         /* Re enable the Rx interrupts. */
2433         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2434         atomic_dec(&nic->isr_cnt);
2435         return 0;
2436
2437 no_rx:
2438         dev->quota -= pkt_cnt;
2439         *budget -= pkt_cnt;
2440
2441         for (i = 0; i < config->rx_ring_num; i++) {
2442                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2443                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2444                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2445                         break;
2446                 }
2447         }
2448         atomic_dec(&nic->isr_cnt);
2449         return 1;
2450 }
2451 #endif
2452
2453 /**
2454  *  rx_intr_handler - Rx interrupt handler
2455  *  @nic: device private variable.
2456  *  Description:
2457  *  If the interrupt is because of a received frame or if the
2458  *  receive ring contains fresh as yet un-processed frames,this function is
2459  *  called. It picks out the RxD at which place the last Rx processing had
2460  *  stopped and sends the skb to the OSM's Rx handler and then increments
2461  *  the offset.
2462  *  Return Value:
2463  *  NONE.
2464  */
2465 static void rx_intr_handler(ring_info_t *ring_data)
2466 {
2467         nic_t *nic = ring_data->nic;
2468         struct net_device *dev = (struct net_device *) nic->dev;
2469         int get_block, get_offset, put_block, put_offset, ring_bufs;
2470         rx_curr_get_info_t get_info, put_info;
2471         RxD_t *rxdp;
2472         struct sk_buff *skb;
2473 #ifndef CONFIG_S2IO_NAPI
2474         int pkt_cnt = 0;
2475 #endif
2476         spin_lock(&nic->rx_lock);
2477         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2478                 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2479                           __FUNCTION__, dev->name);
2480                 spin_unlock(&nic->rx_lock);
2481         }
2482
2483         get_info = ring_data->rx_curr_get_info;
2484         get_block = get_info.block_index;
2485         put_info = ring_data->rx_curr_put_info;
2486         put_block = put_info.block_index;
2487         ring_bufs = get_info.ring_len+1;
2488         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2489                     get_info.offset;
2490         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2491                 get_info.offset;
2492 #ifndef CONFIG_S2IO_NAPI
2493         spin_lock(&nic->put_lock);
2494         put_offset = ring_data->put_pos;
2495         spin_unlock(&nic->put_lock);
2496 #else
2497         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2498                 put_info.offset;
2499 #endif
2500         while (RXD_IS_UP2DT(rxdp) &&
2501                (((get_offset + 1) % ring_bufs) != put_offset)) {
2502                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2503                 if (skb == NULL) {
2504                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2505                                   dev->name);
2506                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2507                         spin_unlock(&nic->rx_lock);
2508                         return;
2509                 }
2510 #ifndef CONFIG_2BUFF_MODE
2511                 pci_unmap_single(nic->pdev, (dma_addr_t)
2512                                  rxdp->Buffer0_ptr,
2513                                  dev->mtu +
2514                                  HEADER_ETHERNET_II_802_3_SIZE +
2515                                  HEADER_802_2_SIZE +
2516                                  HEADER_SNAP_SIZE,
2517                                  PCI_DMA_FROMDEVICE);
2518 #else
2519                 pci_unmap_single(nic->pdev, (dma_addr_t)
2520                                  rxdp->Buffer0_ptr,
2521                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2522                 pci_unmap_single(nic->pdev, (dma_addr_t)
2523                                  rxdp->Buffer1_ptr,
2524                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2525                 pci_unmap_single(nic->pdev, (dma_addr_t)
2526                                  rxdp->Buffer2_ptr,
2527                                  dev->mtu + BUF0_LEN + 4,
2528                                  PCI_DMA_FROMDEVICE);
2529 #endif
2530                 rx_osm_handler(ring_data, rxdp);
2531                 get_info.offset++;
2532                 ring_data->rx_curr_get_info.offset =
2533                     get_info.offset;
2534                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2535                     get_info.offset;
2536                 if (get_info.offset &&
2537                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2538                         get_info.offset = 0;
2539                         ring_data->rx_curr_get_info.offset
2540                             = get_info.offset;
2541                         get_block++;
2542                         get_block %= ring_data->block_count;
2543                         ring_data->rx_curr_get_info.block_index
2544                             = get_block;
2545                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2546                 }
2547
2548                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2549                             get_info.offset;
2550 #ifdef CONFIG_S2IO_NAPI
2551                 nic->pkts_to_process -= 1;
2552                 if (!nic->pkts_to_process)
2553                         break;
2554 #else
2555                 pkt_cnt++;
2556                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2557                         break;
2558 #endif
2559         }
2560         spin_unlock(&nic->rx_lock);
2561 }
2562
2563 /**
2564  *  tx_intr_handler - Transmit interrupt handler
2565  *  @nic : device private variable
2566  *  Description:
2567  *  If an interrupt was raised to indicate DMA complete of the
2568  *  Tx packet, this function is called. It identifies the last TxD
2569  *  whose buffer was freed and frees all skbs whose data have already
2570  *  DMA'ed into the NICs internal memory.
2571  *  Return Value:
2572  *  NONE
2573  */
2574
2575 static void tx_intr_handler(fifo_info_t *fifo_data)
2576 {
2577         nic_t *nic = fifo_data->nic;
2578         struct net_device *dev = (struct net_device *) nic->dev;
2579         tx_curr_get_info_t get_info, put_info;
2580         struct sk_buff *skb;
2581         TxD_t *txdlp;
2582         u16 j, frg_cnt;
2583
2584         get_info = fifo_data->tx_curr_get_info;
2585         put_info = fifo_data->tx_curr_put_info;
2586         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2587             list_virt_addr;
2588         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2589                (get_info.offset != put_info.offset) &&
2590                (txdlp->Host_Control)) {
2591                 /* Check for TxD errors */
2592                 if (txdlp->Control_1 & TXD_T_CODE) {
2593                         unsigned long long err;
2594                         err = txdlp->Control_1 & TXD_T_CODE;
2595                         DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2596                                   err);
2597                 }
2598
2599                 skb = (struct sk_buff *) ((unsigned long)
2600                                 txdlp->Host_Control);
2601                 if (skb == NULL) {
2602                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2603                         __FUNCTION__);
2604                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2605                         return;
2606                 }
2607
2608                 frg_cnt = skb_shinfo(skb)->nr_frags;
2609                 nic->tx_pkt_count++;
2610
2611                 pci_unmap_single(nic->pdev, (dma_addr_t)
2612                                  txdlp->Buffer_Pointer,
2613                                  skb->len - skb->data_len,
2614                                  PCI_DMA_TODEVICE);
2615                 if (frg_cnt) {
2616                         TxD_t *temp;
2617                         temp = txdlp;
2618                         txdlp++;
2619                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2620                                 skb_frag_t *frag =
2621                                     &skb_shinfo(skb)->frags[j];
2622                                 if (!txdlp->Buffer_Pointer)
2623                                         break;
2624                                 pci_unmap_page(nic->pdev,
2625                                                (dma_addr_t)
2626                                                txdlp->
2627                                                Buffer_Pointer,
2628                                                frag->size,
2629                                                PCI_DMA_TODEVICE);
2630                         }
2631                         txdlp = temp;
2632                 }
2633                 memset(txdlp, 0,
2634                        (sizeof(TxD_t) * fifo_data->max_txds));
2635
2636                 /* Updating the statistics block */
2637                 nic->stats.tx_bytes += skb->len;
2638                 dev_kfree_skb_irq(skb);
2639
2640                 get_info.offset++;
2641                 get_info.offset %= get_info.fifo_len + 1;
2642                 txdlp = (TxD_t *) fifo_data->list_info
2643                     [get_info.offset].list_virt_addr;
2644                 fifo_data->tx_curr_get_info.offset =
2645                     get_info.offset;
2646         }
2647
2648         spin_lock(&nic->tx_lock);
2649         if (netif_queue_stopped(dev))
2650                 netif_wake_queue(dev);
2651         spin_unlock(&nic->tx_lock);
2652 }
2653
2654 /**
2655  *  alarm_intr_handler - Alarm Interrrupt handler
2656  *  @nic: device private variable
2657  *  Description: If the interrupt was neither because of Rx packet or Tx
2658  *  complete, this function is called. If the interrupt was to indicate
2659  *  a loss of link, the OSM link status handler is invoked for any other
2660  *  alarm interrupt the block that raised the interrupt is displayed
2661  *  and a H/W reset is issued.
2662  *  Return Value:
2663  *  NONE
2664 */
2665
2666 static void alarm_intr_handler(struct s2io_nic *nic)
2667 {
2668         struct net_device *dev = (struct net_device *) nic->dev;
2669         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2670         register u64 val64 = 0, err_reg = 0;
2671
2672         /* Handling link status change error Intr */
2673         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2674                 err_reg = readq(&bar0->mac_rmac_err_reg);
2675                 writeq(err_reg, &bar0->mac_rmac_err_reg);
2676                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2677                         schedule_work(&nic->set_link_task);
2678                 }
2679         }
2680
2681         /* Handling Ecc errors */
2682         val64 = readq(&bar0->mc_err_reg);
2683         writeq(val64, &bar0->mc_err_reg);
2684         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2685                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2686                         nic->mac_control.stats_info->sw_stat.
2687                                 double_ecc_errs++;
2688                         DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2689                                   dev->name);
2690                         DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2691                         netif_stop_queue(dev);
2692                         schedule_work(&nic->rst_timer_task);
2693                 } else {
2694                         nic->mac_control.stats_info->sw_stat.
2695                                 single_ecc_errs++;
2696                 }
2697         }
2698
2699         /* In case of a serious error, the device will be Reset. */
2700         val64 = readq(&bar0->serr_source);
2701         if (val64 & SERR_SOURCE_ANY) {
2702                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2703                 DBG_PRINT(ERR_DBG, "serious error!!\n");
2704                 netif_stop_queue(dev);
2705                 schedule_work(&nic->rst_timer_task);
2706         }
2707
2708         /*
2709          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2710          * Error occurs, the adapter will be recycled by disabling the
2711          * adapter enable bit and enabling it again after the device
2712          * becomes Quiescent.
2713          */
2714         val64 = readq(&bar0->pcc_err_reg);
2715         writeq(val64, &bar0->pcc_err_reg);
2716         if (val64 & PCC_FB_ECC_DB_ERR) {
2717                 u64 ac = readq(&bar0->adapter_control);
2718                 ac &= ~(ADAPTER_CNTL_EN);
2719                 writeq(ac, &bar0->adapter_control);
2720                 ac = readq(&bar0->adapter_control);
2721                 schedule_work(&nic->set_link_task);
2722         }
2723
2724         /* Other type of interrupts are not being handled now,  TODO */
2725 }
2726
2727 /**
2728  *  wait_for_cmd_complete - waits for a command to complete.
2729  *  @sp : private member of the device structure, which is a pointer to the
2730  *  s2io_nic structure.
2731  *  Description: Function that waits for a command to Write into RMAC
2732  *  ADDR DATA registers to be completed and returns either success or
2733  *  error depending on whether the command was complete or not.
2734  *  Return value:
2735  *   SUCCESS on success and FAILURE on failure.
2736  */
2737
2738 int wait_for_cmd_complete(nic_t * sp)
2739 {
2740         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2741         int ret = FAILURE, cnt = 0;
2742         u64 val64;
2743
2744         while (TRUE) {
2745                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2746                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2747                         ret = SUCCESS;
2748                         break;
2749                 }
2750                 msleep(50);
2751                 if (cnt++ > 10)
2752                         break;
2753         }
2754
2755         return ret;
2756 }
2757
2758 /**
2759  *  s2io_reset - Resets the card.
2760  *  @sp : private member of the device structure.
2761  *  Description: Function to Reset the card. This function then also
2762  *  restores the previously saved PCI configuration space registers as
2763  *  the card reset also resets the configuration space.
2764  *  Return value:
2765  *  void.
2766  */
2767
2768 void s2io_reset(nic_t * sp)
2769 {
2770         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2771         u64 val64;
2772         u16 subid, pci_cmd;
2773
2774         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2775         if (sp->device_type == XFRAME_I_DEVICE)
2776                 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2777
2778         val64 = SW_RESET_ALL;
2779         writeq(val64, &bar0->sw_reset);
2780
2781         /*
2782          * At this stage, if the PCI write is indeed completed, the
2783          * card is reset and so is the PCI Config space of the device.
2784          * So a read cannot be issued at this stage on any of the
2785          * registers to ensure the write into "sw_reset" register
2786          * has gone through.
2787          * Question: Is there any system call that will explicitly force
2788          * all the write commands still pending on the bus to be pushed
2789          * through?
2790          * As of now I'am just giving a 250ms delay and hoping that the
2791          * PCI write to sw_reset register is done by this time.
2792          */
2793         msleep(250);
2794
2795         if (!(sp->device_type & XFRAME_II_DEVICE)) {
2796                 /* Restore the PCI state saved during initializarion. */
2797                 pci_restore_state(sp->pdev);
2798                 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2799                                      pci_cmd);
2800         } else {
2801                 pci_set_master(sp->pdev);
2802         }
2803         s2io_init_pci(sp);
2804
2805         msleep(250);
2806
2807         /* Set swapper to enable I/O register access */
2808         s2io_set_swapper(sp);
2809
2810         /* Clear certain PCI/PCI-X fields after reset */
2811         if (sp->device_type == XFRAME_II_DEVICE) {
2812                 /* Clear parity err detect bit */
2813                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2814
2815                 /* Clearing PCIX Ecc status register */
2816                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2817
2818                 /* Clearing PCI_STATUS error reflected here */
2819                 writeq(BIT(62), &bar0->txpic_int_reg);
2820         }
2821
2822         /* Reset device statistics maintained by OS */
2823         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2824
2825         /* SXE-002: Configure link and activity LED to turn it off */
2826         subid = sp->pdev->subsystem_device;
2827         if (((subid & 0xFF) >= 0x07) &&
2828             (sp->device_type == XFRAME_I_DEVICE)) {
2829                 val64 = readq(&bar0->gpio_control);
2830                 val64 |= 0x0000800000000000ULL;
2831                 writeq(val64, &bar0->gpio_control);
2832                 val64 = 0x0411040400000000ULL;
2833                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2834         }
2835
2836         /*
2837          * Clear spurious ECC interrupts that would have occured on
2838          * XFRAME II cards after reset.
2839          */
2840         if (sp->device_type == XFRAME_II_DEVICE) {
2841                 val64 = readq(&bar0->pcc_err_reg);
2842                 writeq(val64, &bar0->pcc_err_reg);
2843         }
2844
2845         sp->device_enabled_once = FALSE;
2846 }
2847
2848 /**
2849  *  s2io_set_swapper - to set the swapper controle on the card
2850  *  @sp : private member of the device structure,
2851  *  pointer to the s2io_nic structure.
2852  *  Description: Function to set the swapper control on the card
2853  *  correctly depending on the 'endianness' of the system.
2854  *  Return value:
2855  *  SUCCESS on success and FAILURE on failure.
2856  */
2857
2858 int s2io_set_swapper(nic_t * sp)
2859 {
2860         struct net_device *dev = sp->dev;
2861         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2862         u64 val64, valt, valr;
2863
2864         /*
2865          * Set proper endian settings and verify the same by reading
2866          * the PIF Feed-back register.
2867          */
2868
2869         val64 = readq(&bar0->pif_rd_swapper_fb);
2870         if (val64 != 0x0123456789ABCDEFULL) {
2871                 int i = 0;
2872                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2873                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2874                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2875                                 0};                     /* FE=0, SE=0 */
2876
2877                 while(i<4) {
2878                         writeq(value[i], &bar0->swapper_ctrl);
2879                         val64 = readq(&bar0->pif_rd_swapper_fb);
2880                         if (val64 == 0x0123456789ABCDEFULL)
2881                                 break;
2882                         i++;
2883                 }
2884                 if (i == 4) {
2885                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2886                                 dev->name);
2887                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2888                                 (unsigned long long) val64);
2889                         return FAILURE;
2890                 }
2891                 valr = value[i];
2892         } else {
2893                 valr = readq(&bar0->swapper_ctrl);
2894         }
2895
2896         valt = 0x0123456789ABCDEFULL;
2897         writeq(valt, &bar0->xmsi_address);
2898         val64 = readq(&bar0->xmsi_address);
2899
2900         if(val64 != valt) {
2901                 int i = 0;
2902                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2903                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2904                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2905                                 0};                     /* FE=0, SE=0 */
2906
2907                 while(i<4) {
2908                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2909                         writeq(valt, &bar0->xmsi_address);
2910                         val64 = readq(&bar0->xmsi_address);
2911                         if(val64 == valt)
2912                                 break;
2913                         i++;
2914                 }
2915                 if(i == 4) {
2916                         unsigned long long x = val64;
2917                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2918                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2919                         return FAILURE;
2920                 }
2921         }
2922         val64 = readq(&bar0->swapper_ctrl);
2923         val64 &= 0xFFFF000000000000ULL;
2924
2925 #ifdef  __BIG_ENDIAN
2926         /*
2927          * The device by default set to a big endian format, so a
2928          * big endian driver need not set anything.
2929          */
2930         val64 |= (SWAPPER_CTRL_TXP_FE |
2931                  SWAPPER_CTRL_TXP_SE |
2932                  SWAPPER_CTRL_TXD_R_FE |
2933                  SWAPPER_CTRL_TXD_W_FE |
2934                  SWAPPER_CTRL_TXF_R_FE |
2935                  SWAPPER_CTRL_RXD_R_FE |
2936                  SWAPPER_CTRL_RXD_W_FE |
2937                  SWAPPER_CTRL_RXF_W_FE |
2938                  SWAPPER_CTRL_XMSI_FE |
2939                  SWAPPER_CTRL_XMSI_SE |
2940                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2941         writeq(val64, &bar0->swapper_ctrl);
2942 #else
2943         /*
2944          * Initially we enable all bits to make it accessible by the
2945          * driver, then we selectively enable only those bits that
2946          * we want to set.
2947          */
2948         val64 |= (SWAPPER_CTRL_TXP_FE |
2949                  SWAPPER_CTRL_TXP_SE |
2950                  SWAPPER_CTRL_TXD_R_FE |
2951                  SWAPPER_CTRL_TXD_R_SE |
2952                  SWAPPER_CTRL_TXD_W_FE |
2953                  SWAPPER_CTRL_TXD_W_SE |
2954                  SWAPPER_CTRL_TXF_R_FE |
2955                  SWAPPER_CTRL_RXD_R_FE |
2956                  SWAPPER_CTRL_RXD_R_SE |
2957                  SWAPPER_CTRL_RXD_W_FE |
2958                  SWAPPER_CTRL_RXD_W_SE |
2959                  SWAPPER_CTRL_RXF_W_FE |
2960                  SWAPPER_CTRL_XMSI_FE |
2961                  SWAPPER_CTRL_XMSI_SE |
2962                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2963         writeq(val64, &bar0->swapper_ctrl);
2964 #endif
2965         val64 = readq(&bar0->swapper_ctrl);
2966
2967         /*
2968          * Verifying if endian settings are accurate by reading a
2969          * feedback register.
2970          */
2971         val64 = readq(&bar0->pif_rd_swapper_fb);
2972         if (val64 != 0x0123456789ABCDEFULL) {
2973                 /* Endian settings are incorrect, calls for another dekko. */
2974                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2975                           dev->name);
2976                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2977                           (unsigned long long) val64);
2978                 return FAILURE;
2979         }
2980
2981         return SUCCESS;
2982 }
2983
2984 /* ********************************************************* *
2985  * Functions defined below concern the OS part of the driver *
2986  * ********************************************************* */
2987
2988 /**
2989  *  s2io_open - open entry point of the driver
2990  *  @dev : pointer to the device structure.
2991  *  Description:
2992  *  This function is the open entry point of the driver. It mainly calls a
2993  *  function to allocate Rx buffers and inserts them into the buffer
2994  *  descriptors and then enables the Rx part of the NIC.
2995  *  Return value:
2996  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2997  *   file on failure.
2998  */
2999
3000 int s2io_open(struct net_device *dev)
3001 {
3002         nic_t *sp = dev->priv;
3003         int err = 0;
3004
3005         /*
3006          * Make sure you have link off by default every time
3007          * Nic is initialized
3008          */
3009         netif_carrier_off(dev);
3010         sp->last_link_state = 0;
3011
3012         /* Initialize H/W and enable interrupts */
3013         if (s2io_card_up(sp)) {
3014                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3015                           dev->name);
3016                 err = -ENODEV;
3017                 goto hw_init_failed;
3018         }
3019
3020         /* After proper initialization of H/W, register ISR */
3021         err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3022                           sp->name, dev);
3023         if (err) {
3024                 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3025                           dev->name);
3026                 goto isr_registration_failed;
3027         }
3028
3029         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3030                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3031                 err = -ENODEV;
3032                 goto setting_mac_address_failed;
3033         }
3034
3035         netif_start_queue(dev);
3036         return 0;
3037
3038 setting_mac_address_failed:
3039         free_irq(sp->pdev->irq, dev);
3040 isr_registration_failed:
3041         del_timer_sync(&sp->alarm_timer);
3042         s2io_reset(sp);
3043 hw_init_failed:
3044         return err;
3045 }
3046
3047 /**
3048  *  s2io_close -close entry point of the driver
3049  *  @dev : device pointer.
3050  *  Description:
3051  *  This is the stop entry point of the driver. It needs to undo exactly
3052  *  whatever was done by the open entry point,thus it's usually referred to
3053  *  as the close function.Among other things this function mainly stops the
3054  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3055  *  Return value:
3056  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3057  *  file on failure.
3058  */
3059
3060 int s2io_close(struct net_device *dev)
3061 {
3062         nic_t *sp = dev->priv;
3063         flush_scheduled_work();
3064         netif_stop_queue(dev);
3065         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3066         s2io_card_down(sp);
3067
3068         free_irq(sp->pdev->irq, dev);
3069         sp->device_close_flag = TRUE;   /* Device is shut down. */
3070         return 0;
3071 }
3072
3073 /**
3074  *  s2io_xmit - Tx entry point of te driver
3075  *  @skb : the socket buffer containing the Tx data.
3076  *  @dev : device pointer.
3077  *  Description :
3078  *  This function is the Tx entry point of the driver. S2IO NIC supports
3079  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3080  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3081  *  not be upadted.
3082  *  Return value:
3083  *  0 on success & 1 on failure.
3084  */
3085
3086 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3087 {
3088         nic_t *sp = dev->priv;
3089         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3090         register u64 val64;
3091         TxD_t *txdp;
3092         TxFIFO_element_t __iomem *tx_fifo;
3093         unsigned long flags;
3094 #ifdef NETIF_F_TSO
3095         int mss;
3096 #endif
3097         u16 vlan_tag = 0;
3098         int vlan_priority = 0;
3099         mac_info_t *mac_control;
3100         struct config_param *config;
3101
3102         mac_control = &sp->mac_control;
3103         config = &sp->config;
3104
3105         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3106         spin_lock_irqsave(&sp->tx_lock, flags);
3107         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3108                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3109                           dev->name);
3110                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3111                 dev_kfree_skb(skb);
3112                 return 0;
3113         }
3114
3115         queue = 0;
3116
3117         /* Get Fifo number to Transmit based on vlan priority */
3118         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3119                 vlan_tag = vlan_tx_tag_get(skb);
3120                 vlan_priority = vlan_tag >> 13;
3121                 queue = config->fifo_mapping[vlan_priority];
3122         }
3123
3124         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3125         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3126         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3127                 list_virt_addr;
3128
3129         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3130         /* Avoid "put" pointer going beyond "get" pointer */
3131         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3132                 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
3133                 netif_stop_queue(dev);
3134                 dev_kfree_skb(skb);
3135                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3136                 return 0;
3137         }
3138
3139         /* A buffer with no data will be dropped */
3140         if (!skb->len) {
3141                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3142                 dev_kfree_skb(skb);
3143                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3144                 return 0;
3145         }
3146
3147 #ifdef NETIF_F_TSO
3148         mss = skb_shinfo(skb)->tso_size;
3149         if (mss) {
3150                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3151                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3152         }
3153 #endif
3154
3155         frg_cnt = skb_shinfo(skb)->nr_frags;
3156         frg_len = skb->len - skb->data_len;
3157
3158         txdp->Buffer_Pointer = pci_map_single
3159             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3160         txdp->Host_Control = (unsigned long) skb;
3161         if (skb->ip_summed == CHECKSUM_HW) {
3162                 txdp->Control_2 |=
3163                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3164                      TXD_TX_CKO_UDP_EN);
3165         }
3166
3167         txdp->Control_2 |= config->tx_intr_type;
3168
3169         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3170                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3171                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3172         }
3173
3174         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3175                             TXD_GATHER_CODE_FIRST);
3176         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3177
3178         /* For fragmented SKB. */
3179         for (i = 0; i < frg_cnt; i++) {
3180                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3181                 /* A '0' length fragment will be ignored */
3182                 if (!frag->size)
3183                         continue;
3184                 txdp++;
3185                 txdp->Buffer_Pointer = (u64) pci_map_page
3186                     (sp->pdev, frag->page, frag->page_offset,
3187                      frag->size, PCI_DMA_TODEVICE);
3188                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3189         }
3190         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3191
3192         tx_fifo = mac_control->tx_FIFO_start[queue];
3193         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3194         writeq(val64, &tx_fifo->TxDL_Pointer);
3195
3196         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3197                  TX_FIFO_LAST_LIST);
3198
3199 #ifdef NETIF_F_TSO
3200         if (mss)
3201                 val64 |= TX_FIFO_SPECIAL_FUNC;
3202 #endif
3203         writeq(val64, &tx_fifo->List_Control);
3204
3205         mmiowb();
3206
3207         put_off++;
3208         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3209         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3210
3211         /* Avoid "put" pointer going beyond "get" pointer */
3212         if (((put_off + 1) % queue_len) == get_off) {
3213                 DBG_PRINT(TX_DBG,
3214                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3215                           put_off, get_off);
3216                 netif_stop_queue(dev);
3217         }
3218
3219         dev->trans_start = jiffies;
3220         spin_unlock_irqrestore(&sp->tx_lock, flags);
3221
3222         return 0;
3223 }
3224
3225 static void
3226 s2io_alarm_handle(unsigned long data)
3227 {
3228         nic_t *sp = (nic_t *)data;
3229
3230         alarm_intr_handler(sp);
3231         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3232 }
3233
3234 static void s2io_txpic_intr_handle(nic_t *sp)
3235 {
3236         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
3237         u64 val64;
3238
3239         val64 = readq(&bar0->pic_int_status);
3240         if (val64 & PIC_INT_GPIO) {
3241                 val64 = readq(&bar0->gpio_int_reg);
3242                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3243                     (val64 & GPIO_INT_REG_LINK_UP)) {
3244                         val64 |=  GPIO_INT_REG_LINK_DOWN;
3245                         val64 |= GPIO_INT_REG_LINK_UP;
3246                         writeq(val64, &bar0->gpio_int_reg);
3247                         goto masking;
3248                 }
3249
3250                 if (((sp->last_link_state == LINK_UP) &&
3251                         (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3252                 ((sp->last_link_state == LINK_DOWN) &&
3253                 (val64 & GPIO_INT_REG_LINK_UP))) {
3254                         val64 = readq(&bar0->gpio_int_mask);
3255                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3256                         val64 |= GPIO_INT_MASK_LINK_UP;
3257                         writeq(val64, &bar0->gpio_int_mask);
3258                         s2io_set_link((unsigned long)sp);
3259                 }
3260 masking:
3261                 if (sp->last_link_state == LINK_UP) {
3262                         /*enable down interrupt */
3263                         val64 = readq(&bar0->gpio_int_mask);
3264                         /* unmasks link down intr */
3265                         val64 &=  ~GPIO_INT_MASK_LINK_DOWN;
3266                         /* masks link up intr */
3267                         val64 |= GPIO_INT_MASK_LINK_UP;
3268                         writeq(val64, &bar0->gpio_int_mask);
3269                 } else {
3270                         /*enable UP Interrupt */
3271                         val64 = readq(&bar0->gpio_int_mask);
3272                         /* unmasks link up interrupt */
3273                         val64 &= ~GPIO_INT_MASK_LINK_UP;
3274                         /* masks link down interrupt */
3275                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3276                         writeq(val64, &bar0->gpio_int_mask);
3277                 }
3278         }
3279 }
3280
3281 /**
3282  *  s2io_isr - ISR handler of the device .
3283  *  @irq: the irq of the device.
3284  *  @dev_id: a void pointer to the dev structure of the NIC.
3285  *  @pt_regs: pointer to the registers pushed on the stack.
3286  *  Description:  This function is the ISR handler of the device. It
3287  *  identifies the reason for the interrupt and calls the relevant
3288  *  service routines. As a contongency measure, this ISR allocates the
3289  *  recv buffers, if their numbers are below the panic value which is
3290  *  presently set to 25% of the original number of rcv buffers allocated.
3291  *  Return value:
3292  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
3293  *   IRQ_NONE: will be returned if interrupt is not from our device
3294  */
3295 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3296 {
3297         struct net_device *dev = (struct net_device *) dev_id;
3298         nic_t *sp = dev->priv;
3299         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3300         int i;
3301         u64 reason = 0, val64;
3302         mac_info_t *mac_control;
3303         struct config_param *config;
3304
3305         atomic_inc(&sp->isr_cnt);
3306         mac_control = &sp->mac_control;
3307         config = &sp->config;
3308
3309         /*
3310          * Identify the cause for interrupt and call the appropriate
3311          * interrupt handler. Causes for the interrupt could be;
3312          * 1. Rx of packet.
3313          * 2. Tx complete.
3314          * 3. Link down.
3315          * 4. Error in any functional blocks of the NIC.
3316          */
3317         reason = readq(&bar0->general_int_status);
3318
3319         if (!reason) {
3320                 /* The interrupt was not raised by Xena. */
3321                 atomic_dec(&sp->isr_cnt);
3322                 return IRQ_NONE;
3323         }
3324
3325 #ifdef CONFIG_S2IO_NAPI
3326         if (reason & GEN_INTR_RXTRAFFIC) {
3327                 if (netif_rx_schedule_prep(dev)) {
3328                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3329                                               DISABLE_INTRS);
3330                         __netif_rx_schedule(dev);
3331                 }
3332         }
3333 #else
3334         /* If Intr is because of Rx Traffic */
3335         if (reason & GEN_INTR_RXTRAFFIC) {
3336                 /*
3337                  * rx_traffic_int reg is an R1 register, writing all 1's
3338                  * will ensure that the actual interrupt causing bit get's
3339                  * cleared and hence a read can be avoided.
3340                  */
3341                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3342                 writeq(val64, &bar0->rx_traffic_int);
3343                 for (i = 0; i < config->rx_ring_num; i++) {
3344                         rx_intr_handler(&mac_control->rings[i]);
3345                 }
3346         }
3347 #endif
3348
3349         /* If Intr is because of Tx Traffic */
3350         if (reason & GEN_INTR_TXTRAFFIC) {
3351                 /*
3352                  * tx_traffic_int reg is an R1 register, writing all 1's
3353                  * will ensure that the actual interrupt causing bit get's
3354                  * cleared and hence a read can be avoided.
3355                  */
3356                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3357                 writeq(val64, &bar0->tx_traffic_int);
3358
3359                 for (i = 0; i < config->tx_fifo_num; i++)
3360                         tx_intr_handler(&mac_control->fifos[i]);
3361         }
3362
3363         if (reason & GEN_INTR_TXPIC)
3364                 s2io_txpic_intr_handle(sp);
3365         /*
3366          * If the Rx buffer count is below the panic threshold then
3367          * reallocate the buffers from the interrupt handler itself,
3368          * else schedule a tasklet to reallocate the buffers.
3369          */
3370 #ifndef CONFIG_S2IO_NAPI
3371         for (i = 0; i < config->rx_ring_num; i++) {
3372                 int ret;
3373                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3374                 int level = rx_buffer_level(sp, rxb_size, i);
3375
3376                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3377                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3378                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3379                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3380                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3381                                           dev->name);
3382                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3383                                 clear_bit(0, (&sp->tasklet_status));
3384                                 atomic_dec(&sp->isr_cnt);
3385                                 return IRQ_HANDLED;
3386                         }
3387                         clear_bit(0, (&sp->tasklet_status));
3388                 } else if (level == LOW) {
3389                         tasklet_schedule(&sp->task);
3390                 }
3391         }
3392 #endif
3393
3394         atomic_dec(&sp->isr_cnt);
3395         return IRQ_HANDLED;
3396 }
3397
3398 /**
3399  * s2io_updt_stats -
3400  */
3401 static void s2io_updt_stats(nic_t *sp)
3402 {
3403         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3404         u64 val64;
3405         int cnt = 0;
3406
3407         if (atomic_read(&sp->card_state) == CARD_UP) {
3408                 /* Apprx 30us on a 133 MHz bus */
3409                 val64 = SET_UPDT_CLICKS(10) |
3410                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3411                 writeq(val64, &bar0->stat_cfg);
3412                 do {
3413                         udelay(100);
3414                         val64 = readq(&bar0->stat_cfg);
3415                         if (!(val64 & BIT(0)))
3416                                 break;
3417                         cnt++;
3418                         if (cnt == 5)
3419                                 break; /* Updt failed */
3420                 } while(1);
3421         }
3422 }
3423
3424 /**
3425  *  s2io_get_stats - Updates the device statistics structure.
3426  *  @dev : pointer to the device structure.
3427  *  Description:
3428  *  This function updates the device statistics structure in the s2io_nic
3429  *  structure and returns a pointer to the same.
3430  *  Return value:
3431  *  pointer to the updated net_device_stats structure.
3432  */
3433
3434 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3435 {
3436         nic_t *sp = dev->priv;
3437         mac_info_t *mac_control;
3438         struct config_param *config;
3439
3440
3441         mac_control = &sp->mac_control;
3442         config = &sp->config;
3443
3444         /* Configure Stats for immediate updt */
3445         s2io_updt_stats(sp);
3446
3447         sp->stats.tx_packets =
3448                 le32_to_cpu(mac_control->stats_info->tmac_frms);
3449         sp->stats.tx_errors =
3450                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3451         sp->stats.rx_errors =
3452                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3453         sp->stats.multicast =
3454                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3455         sp->stats.rx_length_errors =
3456                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3457
3458         return (&sp->stats);
3459 }
3460
3461 /**
3462  *  s2io_set_multicast - entry point for multicast address enable/disable.
3463  *  @dev : pointer to the device structure
3464  *  Description:
3465  *  This function is a driver entry point which gets called by the kernel
3466  *  whenever multicast addresses must be enabled/disabled. This also gets
3467  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3468  *  determine, if multicast address must be enabled or if promiscuous mode
3469  *  is to be disabled etc.
3470  *  Return value:
3471  *  void.
3472  */
3473
3474 static void s2io_set_multicast(struct net_device *dev)
3475 {
3476         int i, j, prev_cnt;
3477         struct dev_mc_list *mclist;
3478         nic_t *sp = dev->priv;
3479         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3480         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3481             0xfeffffffffffULL;
3482         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3483         void __iomem *add;
3484
3485         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3486                 /*  Enable all Multicast addresses */
3487                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3488                        &bar0->rmac_addr_data0_mem);
3489                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3490                        &bar0->rmac_addr_data1_mem);
3491                 val64 = RMAC_ADDR_CMD_MEM_WE |
3492                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3493                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3494                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3495                 /* Wait till command completes */
3496                 wait_for_cmd_complete(sp);
3497
3498                 sp->m_cast_flg = 1;
3499                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3500         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3501                 /*  Disable all Multicast addresses */
3502                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3503                        &bar0->rmac_addr_data0_mem);
3504                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3505                        &bar0->rmac_addr_data1_mem);
3506                 val64 = RMAC_ADDR_CMD_MEM_WE |
3507                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3508                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3509                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3510                 /* Wait till command completes */
3511                 wait_for_cmd_complete(sp);
3512
3513                 sp->m_cast_flg = 0;
3514                 sp->all_multi_pos = 0;
3515         }
3516
3517         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3518                 /*  Put the NIC into promiscuous mode */
3519                 add = &bar0->mac_cfg;
3520                 val64 = readq(&bar0->mac_cfg);
3521                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3522
3523                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3524                 writel((u32) val64, add);
3525                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3526                 writel((u32) (val64 >> 32), (add + 4));
3527
3528                 val64 = readq(&bar0->mac_cfg);
3529                 sp->promisc_flg = 1;
3530                 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3531                           dev->name);
3532         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3533                 /*  Remove the NIC from promiscuous mode */
3534                 add = &bar0->mac_cfg;
3535                 val64 = readq(&bar0->mac_cfg);
3536                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3537
3538                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3539                 writel((u32) val64, add);
3540                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3541                 writel((u32) (val64 >> 32), (add + 4));
3542
3543                 val64 = readq(&bar0->mac_cfg);
3544                 sp->promisc_flg = 0;
3545                 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3546                           dev->name);
3547         }
3548
3549         /*  Update individual M_CAST address list */
3550         if ((!sp->m_cast_flg) && dev->mc_count) {
3551                 if (dev->mc_count >
3552                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3553                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3554                                   dev->name);
3555                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
3556                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3557                         return;
3558                 }
3559
3560                 prev_cnt = sp->mc_addr_count;
3561                 sp->mc_addr_count = dev->mc_count;
3562
3563                 /* Clear out the previous list of Mc in the H/W. */
3564                 for (i = 0; i < prev_cnt; i++) {
3565                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3566                                &bar0->rmac_addr_data0_mem);
3567                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3568                                 &bar0->rmac_addr_data1_mem);
3569                         val64 = RMAC_ADDR_CMD_MEM_WE |
3570                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3571                             RMAC_ADDR_CMD_MEM_OFFSET
3572                             (MAC_MC_ADDR_START_OFFSET + i);
3573                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3574
3575                         /* Wait for command completes */
3576                         if (wait_for_cmd_complete(sp)) {
3577                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3578                                           dev->name);
3579                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3580                                 return;
3581                         }
3582                 }
3583
3584                 /* Create the new Rx filter list and update the same in H/W. */
3585                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3586                      i++, mclist = mclist->next) {
3587                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3588                                ETH_ALEN);
3589                         for (j = 0; j < ETH_ALEN; j++) {
3590                                 mac_addr |= mclist->dmi_addr[j];
3591                                 mac_addr <<= 8;
3592                         }
3593                         mac_addr >>= 8;
3594                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3595                                &bar0->rmac_addr_data0_mem);
3596                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3597                                 &bar0->rmac_addr_data1_mem);
3598                         val64 = RMAC_ADDR_CMD_MEM_WE |
3599                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3600                             RMAC_ADDR_CMD_MEM_OFFSET
3601                             (i + MAC_MC_ADDR_START_OFFSET);
3602                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3603
3604                         /* Wait for command completes */
3605                         if (wait_for_cmd_complete(sp)) {
3606                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3607                                           dev->name);
3608                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3609                                 return;
3610                         }
3611                 }
3612         }
3613 }
3614
3615 /**
3616  *  s2io_set_mac_addr - Programs the Xframe mac address
3617  *  @dev : pointer to the device structure.
3618  *  @addr: a uchar pointer to the new mac address which is to be set.
3619  *  Description : This procedure will program the Xframe to receive
3620  *  frames with new Mac Address
3621  *  Return value: SUCCESS on success and an appropriate (-)ve integer
3622  *  as defined in errno.h file on failure.
3623  */
3624
3625 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3626 {
3627         nic_t *sp = dev->priv;
3628         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3629         register u64 val64, mac_addr = 0;
3630         int i;
3631
3632         /*
3633          * Set the new MAC address as the new unicast filter and reflect this
3634          * change on the device address registered with the OS. It will be
3635          * at offset 0.
3636          */
3637         for (i = 0; i < ETH_ALEN; i++) {
3638                 mac_addr <<= 8;
3639                 mac_addr |= addr[i];
3640         }
3641
3642         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3643                &bar0->rmac_addr_data0_mem);
3644
3645         val64 =
3646             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3647             RMAC_ADDR_CMD_MEM_OFFSET(0);
3648         writeq(val64, &bar0->rmac_addr_cmd_mem);
3649         /* Wait till command completes */
3650         if (wait_for_cmd_complete(sp)) {
3651                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3652                 return FAILURE;
3653         }
3654
3655         return SUCCESS;
3656 }
3657
3658 /**
3659  * s2io_ethtool_sset - Sets different link parameters.
3660  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
3661  * @info: pointer to the structure with parameters given by ethtool to set
3662  * link information.
3663  * Description:
3664  * The function sets different link parameters provided by the user onto
3665  * the NIC.
3666  * Return value:
3667  * 0 on success.
3668 */
3669
3670 static int s2io_ethtool_sset(struct net_device *dev,
3671                              struct ethtool_cmd *info)
3672 {
3673         nic_t *sp = dev->priv;
3674         if ((info->autoneg == AUTONEG_ENABLE) ||
3675             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3676                 return -EINVAL;
3677         else {
3678                 s2io_close(sp->dev);
3679                 s2io_open(sp->dev);
3680         }
3681
3682         return 0;
3683 }
3684
3685 /**
3686  * s2io_ethtol_gset - Return link specific information.
3687  * @sp : private member of the device structure, pointer to the
3688  *      s2io_nic structure.
3689  * @info : pointer to the structure with parameters given by ethtool
3690  * to return link information.
3691  * Description:
3692  * Returns link specific information like speed, duplex etc.. to ethtool.
3693  * Return value :
3694  * return 0 on success.
3695  */
3696
3697 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3698 {
3699         nic_t *sp = dev->priv;
3700         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3701         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3702         info->port = PORT_FIBRE;
3703         /* info->transceiver?? TODO */
3704
3705         if (netif_carrier_ok(sp->dev)) {
3706                 info->speed = 10000;
3707                 info->duplex = DUPLEX_FULL;
3708         } else {
3709                 info->speed = -1;
3710                 info->duplex = -1;
3711         }
3712
3713         info->autoneg = AUTONEG_DISABLE;
3714         return 0;
3715 }
3716
3717 /**
3718  * s2io_ethtool_gdrvinfo - Returns driver specific information.
3719  * @sp : private member of the device structure, which is a pointer to the
3720  * s2io_nic structure.
3721  * @info : pointer to the structure with parameters given by ethtool to
3722  * return driver information.
3723  * Description:
3724  * Returns driver specefic information like name, version etc.. to ethtool.
3725  * Return value:
3726  *  void
3727  */
3728
3729 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3730                                   struct ethtool_drvinfo *info)
3731 {
3732         nic_t *sp = dev->priv;
3733
3734         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3735         strncpy(info->version, s2io_driver_version,
3736                 sizeof(s2io_driver_version));
3737         strncpy(info->fw_version, "", 32);
3738         strncpy(info->bus_info, pci_name(sp->pdev), 32);
3739         info->regdump_len = XENA_REG_SPACE;
3740         info->eedump_len = XENA_EEPROM_SPACE;
3741         info->testinfo_len = S2IO_TEST_LEN;
3742         info->n_stats = S2IO_STAT_LEN;
3743 }
3744
3745 /**
3746  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3747  *  @sp: private member of the device structure, which is a pointer to the
3748  *  s2io_nic structure.
3749  *  @regs : pointer to the structure with parameters given by ethtool for
3750  *  dumping the registers.
3751  *  @reg_space: The input argumnet into which all the registers are dumped.
3752  *  Description:
3753  *  Dumps the entire register space of xFrame NIC into the user given
3754  *  buffer area.
3755  * Return value :
3756  * void .
3757 */
3758
3759 static void s2io_ethtool_gregs(struct net_device *dev,
3760                                struct ethtool_regs *regs, void *space)
3761 {
3762         int i;
3763         u64 reg;
3764         u8 *reg_space = (u8 *) space;
3765         nic_t *sp = dev->priv;
3766
3767         regs->len = XENA_REG_SPACE;
3768         regs->version = sp->pdev->subsystem_device;
3769
3770         for (i = 0; i < regs->len; i += 8) {
3771                 reg = readq(sp->bar0 + i);
3772                 memcpy((reg_space + i), &reg, 8);
3773         }
3774 }
3775
3776 /**
3777  *  s2io_phy_id  - timer function that alternates adapter LED.
3778  *  @data : address of the private member of the device structure, which
3779  *  is a pointer to the s2io_nic structure, provided as an u32.
3780  * Description: This is actually the timer function that alternates the
3781  * adapter LED bit of the adapter control bit to set/reset every time on
3782  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3783  *  once every second.
3784 */
3785 static void s2io_phy_id(unsigned long data)
3786 {
3787         nic_t *sp = (nic_t *) data;
3788         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3789         u64 val64 = 0;
3790         u16 subid;
3791
3792         subid = sp->pdev->subsystem_device;
3793         if ((sp->device_type == XFRAME_II_DEVICE) ||
3794                    ((subid & 0xFF) >= 0x07)) {
3795                 val64 = readq(&bar0->gpio_control);
3796                 val64 ^= GPIO_CTRL_GPIO_0;
3797                 writeq(val64, &bar0->gpio_control);
3798         } else {
3799                 val64 = readq(&bar0->adapter_control);
3800                 val64 ^= ADAPTER_LED_ON;
3801                 writeq(val64, &bar0->adapter_control);
3802         }
3803
3804         mod_timer(&sp->id_timer, jiffies + HZ / 2);
3805 }
3806
3807 /**
3808  * s2io_ethtool_idnic - To physically identify the nic on the system.
3809  * @sp : private member of the device structure, which is a pointer to the
3810  * s2io_nic structure.
3811  * @id : pointer to the structure with identification parameters given by
3812  * ethtool.
3813  * Description: Used to physically identify the NIC on the system.
3814  * The Link LED will blink for a time specified by the user for
3815  * identification.
3816  * NOTE: The Link has to be Up to be able to blink the LED. Hence
3817  * identification is possible only if it's link is up.
3818  * Return value:
3819  * int , returns 0 on success
3820  */
3821
3822 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3823 {
3824         u64 val64 = 0, last_gpio_ctrl_val;
3825         nic_t *sp = dev->priv;
3826         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3827         u16 subid;
3828
3829         subid = sp->pdev->subsystem_device;
3830         last_gpio_ctrl_val = readq(&bar0->gpio_control);
3831         if ((sp->device_type == XFRAME_I_DEVICE) &&
3832                 ((subid & 0xFF) < 0x07)) {
3833                 val64 = readq(&bar0->adapter_control);
3834                 if (!(val64 & ADAPTER_CNTL_EN)) {
3835                         printk(KERN_ERR
3836                                "Adapter Link down, cannot blink LED\n");
3837                         return -EFAULT;
3838                 }
3839         }
3840         if (sp->id_timer.function == NULL) {
3841                 init_timer(&sp->id_timer);
3842                 sp->id_timer.function = s2io_phy_id;
3843                 sp->id_timer.data = (unsigned long) sp;
3844         }
3845         mod_timer(&sp->id_timer, jiffies);
3846         if (data)
3847                 msleep_interruptible(data * HZ);
3848         else
3849                 msleep_interruptible(MAX_FLICKER_TIME);
3850         del_timer_sync(&sp->id_timer);
3851
3852         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
3853                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3854                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3855         }
3856
3857         return 0;
3858 }
3859
3860 /**
3861  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3862  * @sp : private member of the device structure, which is a pointer to the
3863  *      s2io_nic structure.
3864  * @ep : pointer to the structure with pause parameters given by ethtool.
3865  * Description:
3866  * Returns the Pause frame generation and reception capability of the NIC.
3867  * Return value:
3868  *  void
3869  */
3870 static void s2io_ethtool_getpause_data(struct net_device *dev,
3871                                        struct ethtool_pauseparam *ep)
3872 {
3873         u64 val64;
3874         nic_t *sp = dev->priv;
3875         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3876
3877         val64 = readq(&bar0->rmac_pause_cfg);
3878         if (val64 & RMAC_PAUSE_GEN_ENABLE)
3879                 ep->tx_pause = TRUE;
3880         if (val64 & RMAC_PAUSE_RX_ENABLE)
3881                 ep->rx_pause = TRUE;
3882         ep->autoneg = FALSE;
3883 }
3884
3885 /**
3886  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
3887  * @sp : private member of the device structure, which is a pointer to the
3888  *      s2io_nic structure.
3889  * @ep : pointer to the structure with pause parameters given by ethtool.
3890  * Description:
3891  * It can be used to set or reset Pause frame generation or reception
3892  * support of the NIC.
3893  * Return value:
3894  * int, returns 0 on Success
3895  */
3896
3897 static int s2io_ethtool_setpause_data(struct net_device *dev,
3898                                struct ethtool_pauseparam *ep)
3899 {
3900         u64 val64;
3901         nic_t *sp = dev->priv;
3902         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3903
3904         val64 = readq(&bar0->rmac_pause_cfg);
3905         if (ep->tx_pause)
3906                 val64 |= RMAC_PAUSE_GEN_ENABLE;
3907         else
3908                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3909         if (ep->rx_pause)
3910                 val64 |= RMAC_PAUSE_RX_ENABLE;
3911         else
3912                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3913         writeq(val64, &bar0->rmac_pause_cfg);
3914         return 0;
3915 }
3916
3917 /**
3918  * read_eeprom - reads 4 bytes of data from user given offset.
3919  * @sp : private member of the device structure, which is a pointer to the
3920  *      s2io_nic structure.
3921  * @off : offset at which the data must be written
3922  * @data : Its an output parameter where the data read at the given
3923  *      offset is stored.
3924  * Description:
3925  * Will read 4 bytes of data from the user given offset and return the
3926  * read data.
3927  * NOTE: Will allow to read only part of the EEPROM visible through the
3928  *   I2C bus.
3929  * Return value:
3930  *  -1 on failure and 0 on success.
3931  */
3932
3933 #define S2IO_DEV_ID             5
3934 static int read_eeprom(nic_t * sp, int off, u32 * data)
3935 {
3936         int ret = -1;
3937         u32 exit_cnt = 0;
3938         u64 val64;
3939         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3940
3941         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3942             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3943             I2C_CONTROL_CNTL_START;
3944         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3945
3946         while (exit_cnt < 5) {
3947                 val64 = readq(&bar0->i2c_control);
3948                 if (I2C_CONTROL_CNTL_END(val64)) {
3949                         *data = I2C_CONTROL_GET_DATA(val64);
3950                         ret = 0;
3951                         break;
3952                 }
3953                 msleep(50);
3954                 exit_cnt++;
3955         }
3956
3957         return ret;
3958 }
3959
3960 /**
3961  *  write_eeprom - actually writes the relevant part of the data value.
3962  *  @sp : private member of the device structure, which is a pointer to the
3963  *       s2io_nic structure.
3964  *  @off : offset at which the data must be written
3965  *  @data : The data that is to be written
3966  *  @cnt : Number of bytes of the data that are actually to be written into
3967  *  the Eeprom. (max of 3)
3968  * Description:
3969  *  Actually writes the relevant part of the data value into the Eeprom
3970  *  through the I2C bus.
3971  * Return value:
3972  *  0 on success, -1 on failure.
3973  */
3974
3975 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3976 {
3977         int exit_cnt = 0, ret = -1;
3978         u64 val64;
3979         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3980
3981         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3982             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3983             I2C_CONTROL_CNTL_START;
3984         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3985
3986         while (exit_cnt < 5) {
3987                 val64 = readq(&bar0->i2c_control);
3988                 if (I2C_CONTROL_CNTL_END(val64)) {
3989                         if (!(val64 & I2C_CONTROL_NACK))
3990                                 ret = 0;
3991                         break;
3992                 }
3993                 msleep(50);
3994                 exit_cnt++;
3995         }
3996
3997         return ret;
3998 }
3999
4000 /**
4001  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
4002  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
4003  *  @eeprom : pointer to the user level structure provided by ethtool,
4004  *  containing all relevant information.
4005  *  @data_buf : user defined value to be written into Eeprom.
4006  *  Description: Reads the values stored in the Eeprom at given offset
4007  *  for a given length. Stores these values int the input argument data
4008  *  buffer 'data_buf' and returns these to the caller (ethtool.)
4009  *  Return value:
4010  *  int  0 on success
4011  */
4012
4013 static int s2io_ethtool_geeprom(struct net_device *dev,
4014                          struct ethtool_eeprom *eeprom, u8 * data_buf)
4015 {
4016         u32 data, i, valid;
4017         nic_t *sp = dev->priv;
4018
4019         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4020
4021         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4022                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4023
4024         for (i = 0; i < eeprom->len; i += 4) {
4025                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4026                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4027                         return -EFAULT;
4028                 }
4029                 valid = INV(data);
4030                 memcpy((data_buf + i), &valid, 4);
4031         }
4032         return 0;
4033 }
4034
4035 /**
4036  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4037  *  @sp : private member of the device structure, which is a pointer to the
4038  *  s2io_nic structure.
4039  *  @eeprom : pointer to the user level structure provided by ethtool,
4040  *  containing all relevant information.
4041  *  @data_buf ; user defined value to be written into Eeprom.
4042  *  Description:
4043  *  Tries to write the user provided value in the Eeprom, at the offset
4044  *  given by the user.
4045  *  Return value:
4046  *  0 on success, -EFAULT on failure.
4047  */
4048
4049 static int s2io_ethtool_seeprom(struct net_device *dev,
4050                                 struct ethtool_eeprom *eeprom,
4051                                 u8 * data_buf)
4052 {
4053         int len = eeprom->len, cnt = 0;
4054         u32 valid = 0, data;
4055         nic_t *sp = dev->priv;
4056
4057         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4058                 DBG_PRINT(ERR_DBG,
4059                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4060                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4061                           eeprom->magic);
4062                 return -EFAULT;
4063         }
4064
4065         while (len) {
4066                 data = (u32) data_buf[cnt] & 0x000000FF;
4067                 if (data) {
4068                         valid = (u32) (data << 24);
4069                 } else
4070                         valid = data;
4071
4072                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4073                         DBG_PRINT(ERR_DBG,
4074                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4075                         DBG_PRINT(ERR_DBG,
4076                                   "write into the specified offset\n");
4077                         return -EFAULT;
4078                 }
4079                 cnt++;
4080                 len--;
4081         }
4082
4083         return 0;
4084 }
4085
4086 /**
4087  * s2io_register_test - reads and writes into all clock domains.
4088  * @sp : private member of the device structure, which is a pointer to the
4089  * s2io_nic structure.
4090  * @data : variable that returns the result of each of the test conducted b
4091  * by the driver.
4092  * Description:
4093  * Read and write into all clock domains. The NIC has 3 clock domains,
4094  * see that registers in all the three regions are accessible.
4095  * Return value:
4096  * 0 on success.
4097  */
4098
4099 static int s2io_register_test(nic_t * sp, uint64_t * data)
4100 {
4101         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4102         u64 val64 = 0;
4103         int fail = 0;
4104
4105         val64 = readq(&bar0->pif_rd_swapper_fb);
4106         if (val64 != 0x123456789abcdefULL) {
4107                 fail = 1;
4108                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4109         }
4110
4111         val64 = readq(&bar0->rmac_pause_cfg);
4112         if (val64 != 0xc000ffff00000000ULL) {
4113                 fail = 1;
4114                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4115         }
4116
4117         val64 = readq(&bar0->rx_queue_cfg);
4118         if (val64 != 0x0808080808080808ULL) {
4119                 fail = 1;
4120                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4121         }
4122
4123         val64 = readq(&bar0->xgxs_efifo_cfg);
4124         if (val64 != 0x000000001923141EULL) {
4125                 fail = 1;
4126                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4127         }
4128
4129         val64 = 0x5A5A5A5A5A5A5A5AULL;
4130         writeq(val64, &bar0->xmsi_data);
4131         val64 = readq(&bar0->xmsi_data);
4132         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4133                 fail = 1;
4134                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4135         }
4136
4137         val64 = 0xA5A5A5A5A5A5A5A5ULL;
4138         writeq(val64, &bar0->xmsi_data);
4139         val64 = readq(&bar0->xmsi_data);
4140         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4141                 fail = 1;
4142                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4143         }
4144
4145         *data = fail;
4146         return 0;
4147 }
4148
4149 /**
4150  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4151  * @sp : private member of the device structure, which is a pointer to the
4152  * s2io_nic structure.
4153  * @data:variable that returns the result of each of the test conducted by
4154  * the driver.
4155  * Description:
4156  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4157  * register.
4158  * Return value:
4159  * 0 on success.
4160  */
4161
4162 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4163 {
4164         int fail = 0;
4165         u32 ret_data;
4166
4167         /* Test Write Error at offset 0 */
4168         if (!write_eeprom(sp, 0, 0, 3))
4169                 fail = 1;
4170
4171         /* Test Write at offset 4f0 */
4172         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4173                 fail = 1;
4174         if (read_eeprom(sp, 0x4F0, &ret_data))
4175                 fail = 1;
4176
4177         if (ret_data != 0x01234567)
4178                 fail = 1;
4179
4180         /* Reset the EEPROM data go FFFF */
4181         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4182
4183         /* Test Write Request Error at offset 0x7c */
4184         if (!write_eeprom(sp, 0x07C, 0, 3))
4185                 fail = 1;
4186
4187         /* Test Write Request at offset 0x7fc */
4188         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4189                 fail = 1;
4190         if (read_eeprom(sp, 0x7FC, &ret_data))
4191                 fail = 1;
4192
4193         if (ret_data != 0x01234567)
4194                 fail = 1;
4195
4196         /* Reset the EEPROM data go FFFF */
4197         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4198
4199         /* Test Write Error at offset 0x80 */
4200         if (!write_eeprom(sp, 0x080, 0, 3))
4201                 fail = 1;
4202
4203         /* Test Write Error at offset 0xfc */
4204         if (!write_eeprom(sp, 0x0FC, 0, 3))
4205                 fail = 1;
4206
4207         /* Test Write Error at offset 0x100 */
4208         if (!write_eeprom(sp, 0x100, 0, 3))
4209                 fail = 1;
4210
4211         /* Test Write Error at offset 4ec */
4212         if (!write_eeprom(sp, 0x4EC, 0, 3))
4213                 fail = 1;
4214
4215         *data = fail;
4216         return 0;
4217 }
4218
4219 /**
4220  * s2io_bist_test - invokes the MemBist test of the card .
4221  * @sp : private member of the device structure, which is a pointer to the
4222  * s2io_nic structure.
4223  * @data:variable that returns the result of each of the test conducted by
4224  * the driver.
4225  * Description:
4226  * This invokes the MemBist test of the card. We give around
4227  * 2 secs time for the Test to complete. If it's still not complete
4228  * within this peiod, we consider that the test failed.
4229  * Return value:
4230  * 0 on success and -1 on failure.
4231  */
4232
4233 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4234 {
4235         u8 bist = 0;
4236         int cnt = 0, ret = -1;
4237
4238         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4239         bist |= PCI_BIST_START;
4240         pci_write_config_word(sp->pdev, PCI_BIST, bist);
4241
4242         while (cnt < 20) {
4243                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4244                 if (!(bist & PCI_BIST_START)) {
4245                         *data = (bist & PCI_BIST_CODE_MASK);
4246                         ret = 0;
4247                         break;
4248                 }
4249                 msleep(100);
4250                 cnt++;
4251         }
4252
4253         return ret;
4254 }
4255
4256 /**
4257  * s2io-link_test - verifies the link state of the nic
4258  * @sp ; private member of the device structure, which is a pointer to the
4259  * s2io_nic structure.
4260  * @data: variable that returns the result of each of the test conducted by
4261  * the driver.
4262  * Description:
4263  * The function verifies the link state of the NIC and updates the input
4264  * argument 'data' appropriately.
4265  * Return value:
4266  * 0 on success.
4267  */
4268
4269 static int s2io_link_test(nic_t * sp, uint64_t * data)
4270 {
4271         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4272         u64 val64;
4273
4274         val64 = readq(&bar0->adapter_status);
4275         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4276                 *data = 1;
4277
4278         return 0;
4279 }
4280
4281 /**
4282  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4283  * @sp - private member of the device structure, which is a pointer to the
4284  * s2io_nic structure.
4285  * @data - variable that returns the result of each of the test
4286  * conducted by the driver.
4287  * Description:
4288  *  This is one of the offline test that tests the read and write
4289  *  access to the RldRam chip on the NIC.
4290  * Return value:
4291  *  0 on success.
4292  */
4293
4294 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4295 {
4296         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4297         u64 val64;
4298         int cnt, iteration = 0, test_pass = 0;
4299
4300         val64 = readq(&bar0->adapter_control);
4301         val64 &= ~ADAPTER_ECC_EN;
4302         writeq(val64, &bar0->adapter_control);
4303
4304         val64 = readq(&bar0->mc_rldram_test_ctrl);
4305         val64 |= MC_RLDRAM_TEST_MODE;
4306         writeq(val64, &bar0->mc_rldram_test_ctrl);
4307
4308         val64 = readq(&bar0->mc_rldram_mrs);
4309         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4310         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4311
4312         val64 |= MC_RLDRAM_MRS_ENABLE;
4313         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4314
4315         while (iteration < 2) {
4316                 val64 = 0x55555555aaaa0000ULL;
4317                 if (iteration == 1) {
4318                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4319                 }
4320                 writeq(val64, &bar0->mc_rldram_test_d0);
4321
4322                 val64 = 0xaaaa5a5555550000ULL;
4323                 if (iteration == 1) {
4324                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4325                 }
4326                 writeq(val64, &bar0->mc_rldram_test_d1);
4327
4328                 val64 = 0x55aaaaaaaa5a0000ULL;
4329                 if (iteration == 1) {
4330                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4331                 }
4332                 writeq(val64, &bar0->mc_rldram_test_d2);
4333
4334                 val64 = (u64) (0x0000003fffff0000ULL);
4335                 writeq(val64, &bar0->mc_rldram_test_add);
4336
4337
4338                 val64 = MC_RLDRAM_TEST_MODE;
4339                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4340
4341                 val64 |=
4342                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4343                     MC_RLDRAM_TEST_GO;
4344                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4345
4346                 for (cnt = 0; cnt < 5; cnt++) {
4347                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4348                         if (val64 & MC_RLDRAM_TEST_DONE)
4349                                 break;
4350                         msleep(200);
4351                 }
4352
4353                 if (cnt == 5)
4354                         break;
4355
4356                 val64 = MC_RLDRAM_TEST_MODE;
4357                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4358
4359                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4360                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4361
4362                 for (cnt = 0; cnt < 5; cnt++) {
4363                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4364                         if (val64 & MC_RLDRAM_TEST_DONE)
4365                                 break;
4366                         msleep(500);
4367                 }
4368
4369                 if (cnt == 5)
4370                         break;
4371
4372                 val64 = readq(&bar0->mc_rldram_test_ctrl);
4373                 if (val64 & MC_RLDRAM_TEST_PASS)
4374                         test_pass = 1;
4375
4376                 iteration++;
4377         }
4378
4379         if (!test_pass)
4380                 *data = 1;
4381         else
4382                 *data = 0;
4383
4384         return 0;
4385 }
4386
4387 /**
4388  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4389  *  @sp : private member of the device structure, which is a pointer to the
4390  *  s2io_nic structure.
4391  *  @ethtest : pointer to a ethtool command specific structure that will be
4392  *  returned to the user.
4393  *  @data : variable that returns the result of each of the test
4394  * conducted by the driver.
4395  * Description:
4396  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
4397  *  the health of the card.
4398  * Return value:
4399  *  void
4400  */
4401
4402 static void s2io_ethtool_test(struct net_device *dev,
4403                               struct ethtool_test *ethtest,
4404                               uint64_t * data)
4405 {
4406         nic_t *sp = dev->priv;
4407         int orig_state = netif_running(sp->dev);
4408
4409         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4410                 /* Offline Tests. */
4411                 if (orig_state)
4412                         s2io_close(sp->dev);
4413
4414                 if (s2io_register_test(sp, &data[0]))
4415                         ethtest->flags |= ETH_TEST_FL_FAILED;
4416
4417                 s2io_reset(sp);
4418
4419                 if (s2io_rldram_test(sp, &data[3]))
4420                         ethtest->flags |= ETH_TEST_FL_FAILED;
4421
4422                 s2io_reset(sp);
4423
4424                 if (s2io_eeprom_test(sp, &data[1]))
4425                         ethtest->flags |= ETH_TEST_FL_FAILED;
4426
4427                 if (s2io_bist_test(sp, &data[4]))
4428                         ethtest->flags |= ETH_TEST_FL_FAILED;
4429
4430                 if (orig_state)
4431                         s2io_open(sp->dev);
4432
4433                 data[2] = 0;
4434         } else {
4435                 /* Online Tests. */
4436                 if (!orig_state) {
4437                         DBG_PRINT(ERR_DBG,
4438                                   "%s: is not up, cannot run test\n",
4439                                   dev->name);
4440                         data[0] = -1;
4441                         data[1] = -1;
4442                         data[2] = -1;
4443                         data[3] = -1;
4444                         data[4] = -1;
4445                 }
4446
4447                 if (s2io_link_test(sp, &data[2]))
4448                         ethtest->flags |= ETH_TEST_FL_FAILED;
4449
4450                 data[0] = 0;
4451                 data[1] = 0;
4452                 data[3] = 0;
4453                 data[4] = 0;
4454         }
4455 }
4456
4457 static void s2io_get_ethtool_stats(struct net_device *dev,
4458                                    struct ethtool_stats *estats,
4459                                    u64 * tmp_stats)
4460 {
4461         int i = 0;
4462         nic_t *sp = dev->priv;
4463         StatInfo_t *stat_info = sp->mac_control.stats_info;
4464
4465         s2io_updt_stats(sp);
4466         tmp_stats[i++] =
4467                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
4468                 le32_to_cpu(stat_info->tmac_frms);
4469         tmp_stats[i++] =
4470                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4471                 le32_to_cpu(stat_info->tmac_data_octets);
4472         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4473         tmp_stats[i++] =
4474                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4475                 le32_to_cpu(stat_info->tmac_mcst_frms);
4476         tmp_stats[i++] =
4477                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4478                 le32_to_cpu(stat_info->tmac_bcst_frms);
4479         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4480         tmp_stats[i++] =
4481                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4482                 le32_to_cpu(stat_info->tmac_any_err_frms);
4483         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4484         tmp_stats[i++] =
4485                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4486                 le32_to_cpu(stat_info->tmac_vld_ip);
4487         tmp_stats[i++] =
4488                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4489                 le32_to_cpu(stat_info->tmac_drop_ip);
4490         tmp_stats[i++] =
4491                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4492                 le32_to_cpu(stat_info->tmac_icmp);
4493         tmp_stats[i++] =
4494                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4495                 le32_to_cpu(stat_info->tmac_rst_tcp);
4496         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4497         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4498                 le32_to_cpu(stat_info->tmac_udp);
4499         tmp_stats[i++] =
4500                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4501                 le32_to_cpu(stat_info->rmac_vld_frms);
4502         tmp_stats[i++] =
4503                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4504                 le32_to_cpu(stat_info->rmac_data_octets);
4505         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4506         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4507         tmp_stats[i++] =
4508                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4509                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4510         tmp_stats[i++] =
4511                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4512                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4513         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4514         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4515         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4516         tmp_stats[i++] =
4517                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4518                 le32_to_cpu(stat_info->rmac_discarded_frms);
4519         tmp_stats[i++] =
4520                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4521                 le32_to_cpu(stat_info->rmac_usized_frms);
4522         tmp_stats[i++] =
4523                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4524                 le32_to_cpu(stat_info->rmac_osized_frms);
4525         tmp_stats[i++] =
4526                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4527                 le32_to_cpu(stat_info->rmac_frag_frms);
4528         tmp_stats[i++] =
4529                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4530                 le32_to_cpu(stat_info->rmac_jabber_frms);
4531         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4532                 le32_to_cpu(stat_info->rmac_ip);
4533         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4534         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4535         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4536                 le32_to_cpu(stat_info->rmac_drop_ip);
4537         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4538                 le32_to_cpu(stat_info->rmac_icmp);
4539         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4540         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4541                 le32_to_cpu(stat_info->rmac_udp);
4542         tmp_stats[i++] =
4543                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4544                 le32_to_cpu(stat_info->rmac_err_drp_udp);
4545         tmp_stats[i++] =
4546                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4547                 le32_to_cpu(stat_info->rmac_pause_cnt);
4548         tmp_stats[i++] =
4549                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4550                 le32_to_cpu(stat_info->rmac_accepted_ip);
4551         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4552         tmp_stats[i++] = 0;
4553         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4554         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4555 }
4556
4557 int s2io_ethtool_get_regs_len(struct net_device *dev)
4558 {
4559         return (XENA_REG_SPACE);
4560 }
4561
4562
4563 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4564 {
4565         nic_t *sp = dev->priv;
4566
4567         return (sp->rx_csum);
4568 }
4569 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4570 {
4571         nic_t *sp = dev->priv;
4572
4573         if (data)
4574                 sp->rx_csum = 1;
4575         else
4576                 sp->rx_csum = 0;
4577
4578         return 0;
4579 }
4580 int s2io_get_eeprom_len(struct net_device *dev)
4581 {
4582         return (XENA_EEPROM_SPACE);
4583 }
4584
4585 int s2io_ethtool_self_test_count(struct net_device *dev)
4586 {
4587         return (S2IO_TEST_LEN);
4588 }
4589 void s2io_ethtool_get_strings(struct net_device *dev,
4590                               u32 stringset, u8 * data)
4591 {
4592         switch (stringset) {
4593         case ETH_SS_TEST:
4594                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4595                 break;
4596         case ETH_SS_STATS:
4597                 memcpy(data, &ethtool_stats_keys,
4598                        sizeof(ethtool_stats_keys));
4599         }
4600 }
4601 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4602 {
4603         return (S2IO_STAT_LEN);
4604 }
4605
4606 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4607 {
4608         if (data)
4609                 dev->features |= NETIF_F_IP_CSUM;
4610         else
4611                 dev->features &= ~NETIF_F_IP_CSUM;
4612
4613         return 0;
4614 }
4615
4616
4617 static struct ethtool_ops netdev_ethtool_ops = {
4618         .get_settings = s2io_ethtool_gset,
4619         .set_settings = s2io_ethtool_sset,
4620         .get_drvinfo = s2io_ethtool_gdrvinfo,
4621         .get_regs_len = s2io_ethtool_get_regs_len,
4622         .get_regs = s2io_ethtool_gregs,
4623         .get_link = ethtool_op_get_link,
4624         .get_eeprom_len = s2io_get_eeprom_len,
4625         .get_eeprom = s2io_ethtool_geeprom,
4626         .set_eeprom = s2io_ethtool_seeprom,
4627         .get_pauseparam = s2io_ethtool_getpause_data,
4628         .set_pauseparam = s2io_ethtool_setpause_data,
4629         .get_rx_csum = s2io_ethtool_get_rx_csum,
4630         .set_rx_csum = s2io_ethtool_set_rx_csum,
4631         .get_tx_csum = ethtool_op_get_tx_csum,
4632         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4633         .get_sg = ethtool_op_get_sg,
4634         .set_sg = ethtool_op_set_sg,
4635 #ifdef NETIF_F_TSO
4636         .get_tso = ethtool_op_get_tso,
4637         .set_tso = ethtool_op_set_tso,
4638 #endif
4639         .self_test_count = s2io_ethtool_self_test_count,
4640         .self_test = s2io_ethtool_test,
4641         .get_strings = s2io_ethtool_get_strings,
4642         .phys_id = s2io_ethtool_idnic,
4643         .get_stats_count = s2io_ethtool_get_stats_count,
4644         .get_ethtool_stats = s2io_get_ethtool_stats
4645 };
4646
4647 /**
4648  *  s2io_ioctl - Entry point for the Ioctl
4649  *  @dev :  Device pointer.
4650  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
4651  *  a proprietary structure used to pass information to the driver.
4652  *  @cmd :  This is used to distinguish between the different commands that
4653  *  can be passed to the IOCTL functions.
4654  *  Description:
4655  *  Currently there are no special functionality supported in IOCTL, hence
4656  *  function always return EOPNOTSUPPORTED
4657  */
4658
4659 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4660 {
4661         return -EOPNOTSUPP;
4662 }
4663
4664 /**
4665  *  s2io_change_mtu - entry point to change MTU size for the device.
4666  *   @dev : device pointer.
4667  *   @new_mtu : the new MTU size for the device.
4668  *   Description: A driver entry point to change MTU size for the device.
4669  *   Before changing the MTU the device must be stopped.
4670  *  Return value:
4671  *   0 on success and an appropriate (-)ve integer as defined in errno.h
4672  *   file on failure.
4673  */
4674
4675 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4676 {
4677         nic_t *sp = dev->priv;
4678
4679         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4680                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4681                           dev->name);
4682                 return -EPERM;
4683         }
4684
4685         dev->mtu = new_mtu;
4686         if (netif_running(dev)) {
4687                 s2io_card_down(sp);
4688                 netif_stop_queue(dev);
4689                 if (s2io_card_up(sp)) {
4690                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4691                                   __FUNCTION__);
4692                 }
4693                 if (netif_queue_stopped(dev))
4694                         netif_wake_queue(dev);
4695         } else { /* Device is down */
4696                 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4697                 u64 val64 = new_mtu;
4698
4699                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4700         }
4701
4702         return 0;
4703 }
4704
4705 /**
4706  *  s2io_tasklet - Bottom half of the ISR.
4707  *  @dev_adr : address of the device structure in dma_addr_t format.
4708  *  Description:
4709  *  This is the tasklet or the bottom half of the ISR. This is
4710  *  an extension of the ISR which is scheduled by the scheduler to be run
4711  *  when the load on the CPU is low. All low priority tasks of the ISR can
4712  *  be pushed into the tasklet. For now the tasklet is used only to
4713  *  replenish the Rx buffers in the Rx buffer descriptors.
4714  *  Return value:
4715  *  void.
4716  */
4717
4718 static void s2io_tasklet(unsigned long dev_addr)
4719 {
4720         struct net_device *dev = (struct net_device *) dev_addr;
4721         nic_t *sp = dev->priv;
4722         int i, ret;
4723         mac_info_t *mac_control;
4724         struct config_param *config;
4725
4726         mac_control = &sp->mac_control;
4727         config = &sp->config;
4728
4729         if (!TASKLET_IN_USE) {
4730                 for (i = 0; i < config->rx_ring_num; i++) {
4731                         ret = fill_rx_buffers(sp, i);
4732                         if (ret == -ENOMEM) {
4733                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
4734                                           dev->name);
4735                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4736                                 break;
4737                         } else if (ret == -EFILL) {
4738                                 DBG_PRINT(ERR_DBG,
4739                                           "%s: Rx Ring %d is full\n",
4740                                           dev->name, i);
4741                                 break;
4742                         }
4743                 }
4744                 clear_bit(0, (&sp->tasklet_status));
4745         }
4746 }
4747
4748 /**
4749  * s2io_set_link - Set the LInk status
4750  * @data: long pointer to device private structue
4751  * Description: Sets the link status for the adapter
4752  */
4753
4754 static void s2io_set_link(unsigned long data)
4755 {
4756         nic_t *nic = (nic_t *) data;
4757         struct net_device *dev = nic->dev;
4758         XENA_dev_config_t __iomem *bar0 = nic->bar0;
4759         register u64 val64;
4760         u16 subid;
4761
4762         if (test_and_set_bit(0, &(nic->link_state))) {
4763                 /* The card is being reset, no point doing anything */
4764                 return;
4765         }
4766
4767         subid = nic->pdev->subsystem_device;
4768         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4769                 /*
4770                  * Allow a small delay for the NICs self initiated
4771                  * cleanup to complete.
4772                  */
4773                 msleep(100);
4774         }
4775
4776         val64 = readq(&bar0->adapter_status);
4777         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4778                 if (LINK_IS_UP(val64)) {
4779                         val64 = readq(&bar0->adapter_control);
4780                         val64 |= ADAPTER_CNTL_EN;
4781                         writeq(val64, &bar0->adapter_control);
4782                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4783                                                              subid)) {
4784                                 val64 = readq(&bar0->gpio_control);
4785                                 val64 |= GPIO_CTRL_GPIO_0;
4786                                 writeq(val64, &bar0->gpio_control);
4787                                 val64 = readq(&bar0->gpio_control);
4788                         } else {
4789                                 val64 |= ADAPTER_LED_ON;
4790                                 writeq(val64, &bar0->adapter_control);
4791                         }
4792                         if (s2io_link_fault_indication(nic) ==
4793                                                 MAC_RMAC_ERR_TIMER) {
4794                                 val64 = readq(&bar0->adapter_status);
4795                                 if (!LINK_IS_UP(val64)) {
4796                                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
4797                                         DBG_PRINT(ERR_DBG, " Link down");
4798                                         DBG_PRINT(ERR_DBG, "after ");
4799                                         DBG_PRINT(ERR_DBG, "enabling ");
4800                                         DBG_PRINT(ERR_DBG, "device \n");
4801                                 }
4802                         }
4803                         if (nic->device_enabled_once == FALSE) {
4804                                 nic->device_enabled_once = TRUE;
4805                         }
4806                         s2io_link(nic, LINK_UP);
4807                 } else {
4808                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4809                                                               subid)) {
4810                                 val64 = readq(&bar0->gpio_control);
4811                                 val64 &= ~GPIO_CTRL_GPIO_0;
4812                                 writeq(val64, &bar0->gpio_control);
4813                                 val64 = readq(&bar0->gpio_control);
4814                         }
4815                         s2io_link(nic, LINK_DOWN);
4816                 }
4817         } else {                /* NIC is not Quiescent. */
4818                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4819                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4820                 netif_stop_queue(dev);
4821         }
4822         clear_bit(0, &(nic->link_state));
4823 }
4824
4825 static void s2io_card_down(nic_t * sp)
4826 {
4827         int cnt = 0;
4828         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4829         unsigned long flags;
4830         register u64 val64 = 0;
4831
4832         del_timer_sync(&sp->alarm_timer);
4833         /* If s2io_set_link task is executing, wait till it completes. */
4834         while (test_and_set_bit(0, &(sp->link_state))) {
4835                 msleep(50);
4836         }
4837         atomic_set(&sp->card_state, CARD_DOWN);
4838
4839         /* disable Tx and Rx traffic on the NIC */
4840         stop_nic(sp);
4841
4842         /* Kill tasklet. */
4843         tasklet_kill(&sp->task);
4844
4845         /* Check if the device is Quiescent and then Reset the NIC */
4846         do {
4847                 val64 = readq(&bar0->adapter_status);
4848                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4849                         break;
4850                 }
4851
4852                 msleep(50);
4853                 cnt++;
4854                 if (cnt == 10) {
4855                         DBG_PRINT(ERR_DBG,
4856                                   "s2io_close:Device not Quiescent ");
4857                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4858                                   (unsigned long long) val64);
4859                         break;
4860                 }
4861         } while (1);
4862         s2io_reset(sp);
4863
4864         /* Waiting till all Interrupt handlers are complete */
4865         cnt = 0;
4866         do {
4867                 msleep(10);
4868                 if (!atomic_read(&sp->isr_cnt))
4869                         break;
4870                 cnt++;
4871         } while(cnt < 5);
4872
4873         spin_lock_irqsave(&sp->tx_lock, flags);
4874         /* Free all Tx buffers */
4875         free_tx_buffers(sp);
4876         spin_unlock_irqrestore(&sp->tx_lock, flags);
4877
4878         /* Free all Rx buffers */
4879         spin_lock_irqsave(&sp->rx_lock, flags);
4880         free_rx_buffers(sp);
4881         spin_unlock_irqrestore(&sp->rx_lock, flags);
4882
4883         clear_bit(0, &(sp->link_state));
4884 }
4885
4886 static int s2io_card_up(nic_t * sp)
4887 {
4888         int i, ret;
4889         mac_info_t *mac_control;
4890         struct config_param *config;
4891         struct net_device *dev = (struct net_device *) sp->dev;
4892
4893         /* Initialize the H/W I/O registers */
4894         if (init_nic(sp) != 0) {
4895                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4896                           dev->name);
4897                 return -ENODEV;
4898         }
4899
4900         /*
4901          * Initializing the Rx buffers. For now we are considering only 1
4902          * Rx ring and initializing buffers into 30 Rx blocks
4903          */
4904         mac_control = &sp->mac_control;
4905         config = &sp->config;
4906
4907         for (i = 0; i < config->rx_ring_num; i++) {
4908                 if ((ret = fill_rx_buffers(sp, i))) {
4909                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4910                                   dev->name);
4911                         s2io_reset(sp);
4912                         free_rx_buffers(sp);
4913                         return -ENOMEM;
4914                 }
4915                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4916                           atomic_read(&sp->rx_bufs_left[i]));
4917         }
4918
4919         /* Setting its receive mode */
4920         s2io_set_multicast(dev);
4921
4922         /* Enable tasklet for the device */
4923         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4924
4925         /* Enable Rx Traffic and interrupts on the NIC */
4926         if (start_nic(sp)) {
4927                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4928                 tasklet_kill(&sp->task);
4929                 s2io_reset(sp);
4930                 free_irq(dev->irq, dev);
4931                 free_rx_buffers(sp);
4932                 return -ENODEV;
4933         }
4934
4935         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4936
4937         atomic_set(&sp->card_state, CARD_UP);
4938         return 0;
4939 }
4940
4941 /**
4942  * s2io_restart_nic - Resets the NIC.
4943  * @data : long pointer to the device private structure
4944  * Description:
4945  * This function is scheduled to be run by the s2io_tx_watchdog
4946  * function after 0.5 secs to reset the NIC. The idea is to reduce
4947  * the run time of the watch dog routine which is run holding a
4948  * spin lock.
4949  */
4950
4951 static void s2io_restart_nic(unsigned long data)
4952 {
4953         struct net_device *dev = (struct net_device *) data;
4954         nic_t *sp = dev->priv;
4955
4956         s2io_card_down(sp);
4957         if (s2io_card_up(sp)) {
4958                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4959                           dev->name);
4960         }
4961         netif_wake_queue(dev);
4962         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4963                   dev->name);
4964
4965 }
4966
4967 /**
4968  *  s2io_tx_watchdog - Watchdog for transmit side.
4969  *  @dev : Pointer to net device structure
4970  *  Description:
4971  *  This function is triggered if the Tx Queue is stopped
4972  *  for a pre-defined amount of time when the Interface is still up.
4973  *  If the Interface is jammed in such a situation, the hardware is
4974  *  reset (by s2io_close) and restarted again (by s2io_open) to
4975  *  overcome any problem that might have been caused in the hardware.
4976  *  Return value:
4977  *  void
4978  */
4979
4980 static void s2io_tx_watchdog(struct net_device *dev)
4981 {
4982         nic_t *sp = dev->priv;
4983
4984         if (netif_carrier_ok(dev)) {
4985                 schedule_work(&sp->rst_timer_task);
4986         }
4987 }
4988
4989 /**
4990  *   rx_osm_handler - To perform some OS related operations on SKB.
4991  *   @sp: private member of the device structure,pointer to s2io_nic structure.
4992  *   @skb : the socket buffer pointer.
4993  *   @len : length of the packet
4994  *   @cksum : FCS checksum of the frame.
4995  *   @ring_no : the ring from which this RxD was extracted.
4996  *   Description:
4997  *   This function is called by the Tx interrupt serivce routine to perform
4998  *   some OS related operations on the SKB before passing it to the upper
4999  *   layers. It mainly checks if the checksum is OK, if so adds it to the
5000  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
5001  *   to the upper layer. If the checksum is wrong, it increments the Rx
5002  *   packet error count, frees the SKB and returns error.
5003  *   Return value:
5004  *   SUCCESS on success and -1 on failure.
5005  */
5006 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5007 {
5008         nic_t *sp = ring_data->nic;
5009         struct net_device *dev = (struct net_device *) sp->dev;
5010         struct sk_buff *skb = (struct sk_buff *)
5011                 ((unsigned long) rxdp->Host_Control);
5012         int ring_no = ring_data->ring_no;
5013         u16 l3_csum, l4_csum;
5014 #ifdef CONFIG_2BUFF_MODE
5015         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5016         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5017         int get_block = ring_data->rx_curr_get_info.block_index;
5018         int get_off = ring_data->rx_curr_get_info.offset;
5019         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5020         unsigned char *buff;
5021 #else
5022         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5023 #endif
5024         skb->dev = dev;
5025         if (rxdp->Control_1 & RXD_T_CODE) {
5026                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5027                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5028                           dev->name, err);
5029                 dev_kfree_skb(skb);
5030                 sp->stats.rx_crc_errors++;
5031                 atomic_dec(&sp->rx_bufs_left[ring_no]);
5032                 rxdp->Host_Control = 0;
5033                 return 0;
5034         }
5035
5036         /* Updating statistics */
5037         rxdp->Host_Control = 0;
5038         sp->rx_pkt_count++;
5039         sp->stats.rx_packets++;
5040 #ifndef CONFIG_2BUFF_MODE
5041         sp->stats.rx_bytes += len;
5042 #else
5043         sp->stats.rx_bytes += buf0_len + buf2_len;
5044 #endif
5045
5046 #ifndef CONFIG_2BUFF_MODE
5047         skb_put(skb, len);
5048 #else
5049         buff = skb_push(skb, buf0_len);
5050         memcpy(buff, ba->ba_0, buf0_len);
5051         skb_put(skb, buf2_len);
5052 #endif
5053
5054         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5055             (sp->rx_csum)) {
5056                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5057                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5058                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5059                         /*
5060                          * NIC verifies if the Checksum of the received
5061                          * frame is Ok or not and accordingly returns
5062                          * a flag in the RxD.
5063                          */
5064                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5065                 } else {
5066                         /*
5067                          * Packet with erroneous checksum, let the
5068                          * upper layers deal with it.
5069                          */
5070                         skb->ip_summed = CHECKSUM_NONE;
5071                 }
5072         } else {
5073                 skb->ip_summed = CHECKSUM_NONE;
5074         }
5075
5076         skb->protocol = eth_type_trans(skb, dev);
5077 #ifdef CONFIG_S2IO_NAPI
5078         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5079                 /* Queueing the vlan frame to the upper layer */
5080                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5081                         RXD_GET_VLAN_TAG(rxdp->Control_2));
5082         } else {
5083                 netif_receive_skb(skb);
5084         }
5085 #else
5086         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5087                 /* Queueing the vlan frame to the upper layer */
5088                 vlan_hwaccel_rx(skb, sp->vlgrp,
5089                         RXD_GET_VLAN_TAG(rxdp->Control_2));
5090         } else {
5091                 netif_rx(skb);
5092         }
5093 #endif
5094         dev->last_rx = jiffies;
5095         atomic_dec(&sp->rx_bufs_left[ring_no]);
5096         return SUCCESS;
5097 }
5098
5099 /**
5100  *  s2io_link - stops/starts the Tx queue.
5101  *  @sp : private member of the device structure, which is a pointer to the
5102  *  s2io_nic structure.
5103  *  @link : inidicates whether link is UP/DOWN.
5104  *  Description:
5105  *  This function stops/starts the Tx queue depending on whether the link
5106  *  status of the NIC is is down or up. This is called by the Alarm
5107  *  interrupt handler whenever a link change interrupt comes up.
5108  *  Return value:
5109  *  void.
5110  */
5111
5112 void s2io_link(nic_t * sp, int link)
5113 {
5114         struct net_device *dev = (struct net_device *) sp->dev;
5115
5116         if (link != sp->last_link_state) {
5117                 if (link == LINK_DOWN) {
5118                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5119                         netif_carrier_off(dev);
5120                 } else {
5121                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5122                         netif_carrier_on(dev);
5123                 }
5124         }
5125         sp->last_link_state = link;
5126 }
5127
5128 /**
5129  *  get_xena_rev_id - to identify revision ID of xena.
5130  *  @pdev : PCI Dev structure
5131  *  Description:
5132  *  Function to identify the Revision ID of xena.
5133  *  Return value:
5134  *  returns the revision ID of the device.
5135  */
5136
5137 int get_xena_rev_id(struct pci_dev *pdev)
5138 {
5139         u8 id = 0;
5140         int ret;
5141         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5142         return id;
5143 }
5144
5145 /**
5146  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5147  *  @sp : private member of the device structure, which is a pointer to the
5148  *  s2io_nic structure.
5149  *  Description:
5150  *  This function initializes a few of the PCI and PCI-X configuration registers
5151  *  with recommended values.
5152  *  Return value:
5153  *  void
5154  */
5155
5156 static void s2io_init_pci(nic_t * sp)
5157 {
5158         u16 pci_cmd = 0, pcix_cmd = 0;
5159
5160         /* Enable Data Parity Error Recovery in PCI-X command register. */
5161         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5162                              &(pcix_cmd));
5163         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5164                               (pcix_cmd | 1));
5165         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5166                              &(pcix_cmd));
5167
5168         /* Set the PErr Response bit in PCI command register. */
5169         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5170         pci_write_config_word(sp->pdev, PCI_COMMAND,
5171                               (pci_cmd | PCI_COMMAND_PARITY));
5172         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5173
5174         /* Forcibly disabling relaxed ordering capability of the card. */
5175         pcix_cmd &= 0xfffd;
5176         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5177                               pcix_cmd);
5178         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5179                              &(pcix_cmd));
5180 }
5181
5182 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5183 MODULE_LICENSE("GPL");
5184 module_param(tx_fifo_num, int, 0);
5185 module_param(rx_ring_num, int, 0);
5186 module_param_array(tx_fifo_len, uint, NULL, 0);
5187 module_param_array(rx_ring_sz, uint, NULL, 0);
5188 module_param_array(rts_frm_len, uint, NULL, 0);
5189 module_param(use_continuous_tx_intrs, int, 1);
5190 module_param(rmac_pause_time, int, 0);
5191 module_param(mc_pause_threshold_q0q3, int, 0);
5192 module_param(mc_pause_threshold_q4q7, int, 0);
5193 module_param(shared_splits, int, 0);
5194 module_param(tmac_util_period, int, 0);
5195 module_param(rmac_util_period, int, 0);
5196 module_param(bimodal, bool, 0);
5197 #ifndef CONFIG_S2IO_NAPI
5198 module_param(indicate_max_pkts, int, 0);
5199 #endif
5200 module_param(rxsync_frequency, int, 0);
5201
5202 /**
5203  *  s2io_init_nic - Initialization of the adapter .
5204  *  @pdev : structure containing the PCI related information of the device.
5205  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5206  *  Description:
5207  *  The function initializes an adapter identified by the pci_dec structure.
5208  *  All OS related initialization including memory and device structure and
5209  *  initlaization of the device private variable is done. Also the swapper
5210  *  control register is initialized to enable read and write into the I/O
5211  *  registers of the device.
5212  *  Return value:
5213  *  returns 0 on success and negative on failure.
5214  */
5215
5216 static int __devinit
5217 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5218 {
5219         nic_t *sp;
5220         struct net_device *dev;
5221         int i, j, ret;
5222         int dma_flag = FALSE;
5223         u32 mac_up, mac_down;
5224         u64 val64 = 0, tmp64 = 0;
5225         XENA_dev_config_t __iomem *bar0 = NULL;
5226         u16 subid;
5227         mac_info_t *mac_control;
5228         struct config_param *config;
5229         int mode;
5230
5231 #ifdef CONFIG_S2IO_NAPI
5232         DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5233 #endif
5234
5235         if ((ret = pci_enable_device(pdev))) {
5236                 DBG_PRINT(ERR_DBG,
5237                           "s2io_init_nic: pci_enable_device failed\n");
5238                 return ret;
5239         }
5240
5241         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5242                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5243                 dma_flag = TRUE;
5244                 if (pci_set_consistent_dma_mask
5245                     (pdev, DMA_64BIT_MASK)) {
5246                         DBG_PRINT(ERR_DBG,
5247                                   "Unable to obtain 64bit DMA for \
5248                                         consistent allocations\n");
5249                         pci_disable_device(pdev);
5250                         return -ENOMEM;
5251                 }
5252         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5253                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5254         } else {
5255                 pci_disable_device(pdev);
5256                 return -ENOMEM;
5257         }
5258
5259         if (pci_request_regions(pdev, s2io_driver_name)) {
5260                 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5261                     pci_disable_device(pdev);
5262                 return -ENODEV;
5263         }
5264
5265         dev = alloc_etherdev(sizeof(nic_t));
5266         if (dev == NULL) {
5267                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5268                 pci_disable_device(pdev);
5269                 pci_release_regions(pdev);
5270                 return -ENODEV;
5271         }
5272
5273         pci_set_master(pdev);
5274         pci_set_drvdata(pdev, dev);
5275         SET_MODULE_OWNER(dev);
5276         SET_NETDEV_DEV(dev, &pdev->dev);
5277
5278         /*  Private member variable initialized to s2io NIC structure */
5279         sp = dev->priv;
5280         memset(sp, 0, sizeof(nic_t));
5281         sp->dev = dev;
5282         sp->pdev = pdev;
5283         sp->high_dma_flag = dma_flag;
5284         sp->device_enabled_once = FALSE;
5285
5286         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5287                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5288                 sp->device_type = XFRAME_II_DEVICE;
5289         else
5290                 sp->device_type = XFRAME_I_DEVICE;
5291
5292         /* Initialize some PCI/PCI-X fields of the NIC. */
5293         s2io_init_pci(sp);
5294
5295         /*
5296          * Setting the device configuration parameters.
5297          * Most of these parameters can be specified by the user during
5298          * module insertion as they are module loadable parameters. If
5299          * these parameters are not not specified during load time, they
5300          * are initialized with default values.
5301          */
5302         mac_control = &sp->mac_control;
5303         config = &sp->config;
5304
5305         /* Tx side parameters. */
5306         if (tx_fifo_len[0] == 0)
5307                 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5308         config->tx_fifo_num = tx_fifo_num;
5309         for (i = 0; i < MAX_TX_FIFOS; i++) {
5310                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5311                 config->tx_cfg[i].fifo_priority = i;
5312         }
5313
5314         /* mapping the QoS priority to the configured fifos */
5315         for (i = 0; i < MAX_TX_FIFOS; i++)
5316                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5317
5318         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5319         for (i = 0; i < config->tx_fifo_num; i++) {
5320                 config->tx_cfg[i].f_no_snoop =
5321                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5322                 if (config->tx_cfg[i].fifo_len < 65) {
5323                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5324                         break;
5325                 }
5326         }
5327         config->max_txds = MAX_SKB_FRAGS;
5328
5329         /* Rx side parameters. */
5330         if (rx_ring_sz[0] == 0)
5331                 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5332         config->rx_ring_num = rx_ring_num;
5333         for (i = 0; i < MAX_RX_RINGS; i++) {
5334                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5335                     (MAX_RXDS_PER_BLOCK + 1);
5336                 config->rx_cfg[i].ring_priority = i;
5337         }
5338
5339         for (i = 0; i < rx_ring_num; i++) {
5340                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5341                 config->rx_cfg[i].f_no_snoop =
5342                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5343         }
5344
5345         /*  Setting Mac Control parameters */
5346         mac_control->rmac_pause_time = rmac_pause_time;
5347         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5348         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5349
5350
5351         /* Initialize Ring buffer parameters. */
5352         for (i = 0; i < config->rx_ring_num; i++)
5353                 atomic_set(&sp->rx_bufs_left[i], 0);
5354
5355         /* Initialize the number of ISRs currently running */
5356         atomic_set(&sp->isr_cnt, 0);
5357
5358         /*  initialize the shared memory used by the NIC and the host */
5359         if (init_shared_mem(sp)) {
5360                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5361                           __FUNCTION__);
5362                 ret = -ENOMEM;
5363                 goto mem_alloc_failed;
5364         }
5365
5366         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5367                                      pci_resource_len(pdev, 0));
5368         if (!sp->bar0) {
5369                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5370                           dev->name);
5371                 ret = -ENOMEM;
5372                 goto bar0_remap_failed;
5373         }
5374
5375         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5376                                      pci_resource_len(pdev, 2));
5377         if (!sp->bar1) {
5378                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5379                           dev->name);
5380                 ret = -ENOMEM;
5381                 goto bar1_remap_failed;
5382         }
5383
5384         dev->irq = pdev->irq;
5385         dev->base_addr = (unsigned long) sp->bar0;
5386
5387         /* Initializing the BAR1 address as the start of the FIFO pointer. */
5388         for (j = 0; j < MAX_TX_FIFOS; j++) {
5389                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5390                     (sp->bar1 + (j * 0x00020000));
5391         }
5392
5393         /*  Driver entry points */
5394         dev->open = &s2io_open;
5395         dev->stop = &s2io_close;
5396         dev->hard_start_xmit = &s2io_xmit;
5397         dev->get_stats = &s2io_get_stats;
5398         dev->set_multicast_list = &s2io_set_multicast;
5399         dev->do_ioctl = &s2io_ioctl;
5400         dev->change_mtu = &s2io_change_mtu;
5401         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5402         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5403         dev->vlan_rx_register = s2io_vlan_rx_register;
5404         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5405
5406         /*
5407          * will use eth_mac_addr() for  dev->set_mac_address
5408          * mac address will be set every time dev->open() is called
5409          */
5410 #if defined(CONFIG_S2IO_NAPI)
5411         dev->poll = s2io_poll;
5412         dev->weight = 32;
5413 #endif
5414
5415         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5416         if (sp->high_dma_flag == TRUE)
5417                 dev->features |= NETIF_F_HIGHDMA;
5418 #ifdef NETIF_F_TSO
5419         dev->features |= NETIF_F_TSO;
5420 #endif
5421
5422         dev->tx_timeout = &s2io_tx_watchdog;
5423         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5424         INIT_WORK(&sp->rst_timer_task,
5425                   (void (*)(void *)) s2io_restart_nic, dev);
5426         INIT_WORK(&sp->set_link_task,
5427                   (void (*)(void *)) s2io_set_link, sp);
5428
5429         if (!(sp->device_type & XFRAME_II_DEVICE)) {
5430                 pci_save_state(sp->pdev);
5431         }
5432
5433         /* Setting swapper control on the NIC, for proper reset operation */
5434         if (s2io_set_swapper(sp)) {
5435                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5436                           dev->name);
5437                 ret = -EAGAIN;
5438                 goto set_swap_failed;
5439         }
5440
5441         /* Verify if the Herc works on the slot its placed into */
5442         if (sp->device_type & XFRAME_II_DEVICE) {
5443                 mode = s2io_verify_pci_mode(sp);
5444                 if (mode < 0) {
5445                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5446                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5447                         ret = -EBADSLT;
5448                         goto set_swap_failed;
5449                 }
5450         }
5451
5452         /* Not needed for Herc */
5453         if (sp->device_type & XFRAME_I_DEVICE) {
5454                 /*
5455                  * Fix for all "FFs" MAC address problems observed on
5456                  * Alpha platforms
5457                  */
5458                 fix_mac_address(sp);
5459                 s2io_reset(sp);
5460         }
5461
5462         /*
5463          * MAC address initialization.
5464          * For now only one mac address will be read and used.
5465          */
5466         bar0 = sp->bar0;
5467         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5468             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5469         writeq(val64, &bar0->rmac_addr_cmd_mem);
5470         wait_for_cmd_complete(sp);
5471
5472         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5473         mac_down = (u32) tmp64;
5474         mac_up = (u32) (tmp64 >> 32);
5475
5476         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5477
5478         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5479         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5480         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5481         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5482         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5483         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5484
5485         /*  Set the factory defined MAC address initially   */
5486         dev->addr_len = ETH_ALEN;
5487         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5488
5489         /*
5490          * Initialize the tasklet status and link state flags
5491          * and the card state parameter
5492          */
5493         atomic_set(&(sp->card_state), 0);
5494         sp->tasklet_status = 0;
5495         sp->link_state = 0;
5496
5497         /* Initialize spinlocks */
5498         spin_lock_init(&sp->tx_lock);
5499 #ifndef CONFIG_S2IO_NAPI
5500         spin_lock_init(&sp->put_lock);
5501 #endif
5502         spin_lock_init(&sp->rx_lock);
5503
5504         /*
5505          * SXE-002: Configure link and activity LED to init state
5506          * on driver load.
5507          */
5508         subid = sp->pdev->subsystem_device;
5509         if ((subid & 0xFF) >= 0x07) {
5510                 val64 = readq(&bar0->gpio_control);
5511                 val64 |= 0x0000800000000000ULL;
5512                 writeq(val64, &bar0->gpio_control);
5513                 val64 = 0x0411040400000000ULL;
5514                 writeq(val64, (void __iomem *) bar0 + 0x2700);
5515                 val64 = readq(&bar0->gpio_control);
5516         }
5517
5518         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
5519
5520         if (register_netdev(dev)) {
5521                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5522                 ret = -ENODEV;
5523                 goto register_failed;
5524         }
5525
5526         if (sp->device_type & XFRAME_II_DEVICE) {
5527                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5528                           dev->name);
5529                 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5530                                 get_xena_rev_id(sp->pdev),
5531                                 s2io_driver_version);
5532                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5533                           sp->def_mac_addr[0].mac_addr[0],
5534                           sp->def_mac_addr[0].mac_addr[1],
5535                           sp->def_mac_addr[0].mac_addr[2],
5536                           sp->def_mac_addr[0].mac_addr[3],
5537                           sp->def_mac_addr[0].mac_addr[4],
5538                           sp->def_mac_addr[0].mac_addr[5]);
5539                 mode = s2io_print_pci_mode(sp);
5540                 if (mode < 0) {
5541                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5542                         ret = -EBADSLT;
5543                         goto set_swap_failed;
5544                 }
5545         } else {
5546                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5547                           dev->name);
5548                 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5549                                         get_xena_rev_id(sp->pdev),
5550                                         s2io_driver_version);
5551                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5552                           sp->def_mac_addr[0].mac_addr[0],
5553                           sp->def_mac_addr[0].mac_addr[1],
5554                           sp->def_mac_addr[0].mac_addr[2],
5555                           sp->def_mac_addr[0].mac_addr[3],
5556                           sp->def_mac_addr[0].mac_addr[4],
5557                           sp->def_mac_addr[0].mac_addr[5]);
5558         }
5559
5560         /* Initialize device name */
5561         strcpy(sp->name, dev->name);
5562         if (sp->device_type & XFRAME_II_DEVICE)
5563                 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5564         else
5565                 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5566
5567         /* Initialize bimodal Interrupts */
5568         sp->config.bimodal = bimodal;
5569         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5570                 sp->config.bimodal = 0;
5571                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5572                         dev->name);
5573         }
5574
5575         /*
5576          * Make Link state as off at this point, when the Link change
5577          * interrupt comes the state will be automatically changed to
5578          * the right state.
5579          */
5580         netif_carrier_off(dev);
5581
5582         return 0;
5583
5584       register_failed:
5585       set_swap_failed:
5586         iounmap(sp->bar1);
5587       bar1_remap_failed:
5588         iounmap(sp->bar0);
5589       bar0_remap_failed:
5590       mem_alloc_failed:
5591         free_shared_mem(sp);
5592         pci_disable_device(pdev);
5593         pci_release_regions(pdev);
5594         pci_set_drvdata(pdev, NULL);
5595         free_netdev(dev);
5596
5597         return ret;
5598 }
5599
5600 /**
5601  * s2io_rem_nic - Free the PCI device
5602  * @pdev: structure containing the PCI related information of the device.
5603  * Description: This function is called by the Pci subsystem to release a
5604  * PCI device and free up all resource held up by the device. This could
5605  * be in response to a Hot plug event or when the driver is to be removed
5606  * from memory.
5607  */
5608
5609 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5610 {
5611         struct net_device *dev =
5612             (struct net_device *) pci_get_drvdata(pdev);
5613         nic_t *sp;
5614
5615         if (dev == NULL) {
5616                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5617                 return;
5618         }
5619
5620         sp = dev->priv;
5621         unregister_netdev(dev);
5622
5623         free_shared_mem(sp);
5624         iounmap(sp->bar0);
5625         iounmap(sp->bar1);
5626         pci_disable_device(pdev);
5627         pci_release_regions(pdev);
5628         pci_set_drvdata(pdev, NULL);
5629         free_netdev(dev);
5630 }
5631
5632 /**
5633  * s2io_starter - Entry point for the driver
5634  * Description: This function is the entry point for the driver. It verifies
5635  * the module loadable parameters and initializes PCI configuration space.
5636  */
5637
5638 int __init s2io_starter(void)
5639 {
5640         return pci_module_init(&s2io_driver);
5641 }
5642
5643 /**
5644  * s2io_closer - Cleanup routine for the driver
5645  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5646  */
5647
5648 void s2io_closer(void)
5649 {
5650         pci_unregister_driver(&s2io_driver);
5651         DBG_PRINT(INIT_DBG, "cleanup done\n");
5652 }
5653
5654 module_init(s2io_starter);
5655 module_exit(s2io_closer);