[PATCH] S2io: Timer based slowpath handling
[safe/jmp/linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_len: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
61 #include <asm/io.h>
62
63 /* local include */
64 #include "s2io.h"
65 #include "s2io-regs.h"
66
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
70
71 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
72 {
73         int ret;
74
75         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
77
78         return ret;
79 }
80
81 /*
82  * Cards with following subsystem_id have a link state indication
83  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84  * macro below identifies these cards given the subsystem_id.
85  */
86 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87                 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
89
90 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
93 #define PANIC   1
94 #define LOW     2
95 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
96 {
97         int level = 0;
98         mac_info_t *mac_control;
99
100         mac_control = &sp->mac_control;
101         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
102                 level = LOW;
103                 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
104                         level = PANIC;
105                 }
106         }
107
108         return level;
109 }
110
111 /* Ethtool related variables and Macros. */
112 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
113         "Register test\t(offline)",
114         "Eeprom test\t(offline)",
115         "Link test\t(online)",
116         "RLDRAM test\t(offline)",
117         "BIST Test\t(offline)"
118 };
119
120 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
121         {"tmac_frms"},
122         {"tmac_data_octets"},
123         {"tmac_drop_frms"},
124         {"tmac_mcst_frms"},
125         {"tmac_bcst_frms"},
126         {"tmac_pause_ctrl_frms"},
127         {"tmac_any_err_frms"},
128         {"tmac_vld_ip_octets"},
129         {"tmac_vld_ip"},
130         {"tmac_drop_ip"},
131         {"tmac_icmp"},
132         {"tmac_rst_tcp"},
133         {"tmac_tcp"},
134         {"tmac_udp"},
135         {"rmac_vld_frms"},
136         {"rmac_data_octets"},
137         {"rmac_fcs_err_frms"},
138         {"rmac_drop_frms"},
139         {"rmac_vld_mcst_frms"},
140         {"rmac_vld_bcst_frms"},
141         {"rmac_in_rng_len_err_frms"},
142         {"rmac_long_frms"},
143         {"rmac_pause_ctrl_frms"},
144         {"rmac_discarded_frms"},
145         {"rmac_usized_frms"},
146         {"rmac_osized_frms"},
147         {"rmac_frag_frms"},
148         {"rmac_jabber_frms"},
149         {"rmac_ip"},
150         {"rmac_ip_octets"},
151         {"rmac_hdr_err_ip"},
152         {"rmac_drop_ip"},
153         {"rmac_icmp"},
154         {"rmac_tcp"},
155         {"rmac_udp"},
156         {"rmac_err_drp_udp"},
157         {"rmac_pause_cnt"},
158         {"rmac_accepted_ip"},
159         {"rmac_err_tcp"},
160         {"\n DRIVER STATISTICS"},
161         {"single_bit_ecc_errs"},
162         {"double_bit_ecc_errs"},
163 };
164
165 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
166 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
167
168 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
169 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
170
171 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
172                         init_timer(&timer);                     \
173                         timer.function = handle;                \
174                         timer.data = (unsigned long) arg;       \
175                         mod_timer(&timer, (jiffies + exp))      \
176
177 /*
178  * Constants to be programmed into the Xena's registers, to configure
179  * the XAUI.
180  */
181
182 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
183 #define END_SIGN        0x0
184
185 static u64 default_mdio_cfg[] = {
186         /* Reset PMA PLL */
187         0xC001010000000000ULL, 0xC0010100000000E0ULL,
188         0xC0010100008000E4ULL,
189         /* Remove Reset from PMA PLL */
190         0xC001010000000000ULL, 0xC0010100000000E0ULL,
191         0xC0010100000000E4ULL,
192         END_SIGN
193 };
194
195 static u64 default_dtx_cfg[] = {
196         0x8000051500000000ULL, 0x80000515000000E0ULL,
197         0x80000515D93500E4ULL, 0x8001051500000000ULL,
198         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
199         0x8002051500000000ULL, 0x80020515000000E0ULL,
200         0x80020515F21000E4ULL,
201         /* Set PADLOOPBACKN */
202         0x8002051500000000ULL, 0x80020515000000E0ULL,
203         0x80020515B20000E4ULL, 0x8003051500000000ULL,
204         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
205         0x8004051500000000ULL, 0x80040515000000E0ULL,
206         0x80040515B20000E4ULL, 0x8005051500000000ULL,
207         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
208         SWITCH_SIGN,
209         /* Remove PADLOOPBACKN */
210         0x8002051500000000ULL, 0x80020515000000E0ULL,
211         0x80020515F20000E4ULL, 0x8003051500000000ULL,
212         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
213         0x8004051500000000ULL, 0x80040515000000E0ULL,
214         0x80040515F20000E4ULL, 0x8005051500000000ULL,
215         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
216         END_SIGN
217 };
218
219 /*
220  * Constants for Fixing the MacAddress problem seen mostly on
221  * Alpha machines.
222  */
223 static u64 fix_mac[] = {
224         0x0060000000000000ULL, 0x0060600000000000ULL,
225         0x0040600000000000ULL, 0x0000600000000000ULL,
226         0x0020600000000000ULL, 0x0060600000000000ULL,
227         0x0020600000000000ULL, 0x0060600000000000ULL,
228         0x0020600000000000ULL, 0x0060600000000000ULL,
229         0x0020600000000000ULL, 0x0060600000000000ULL,
230         0x0020600000000000ULL, 0x0060600000000000ULL,
231         0x0020600000000000ULL, 0x0060600000000000ULL,
232         0x0020600000000000ULL, 0x0060600000000000ULL,
233         0x0020600000000000ULL, 0x0060600000000000ULL,
234         0x0020600000000000ULL, 0x0060600000000000ULL,
235         0x0020600000000000ULL, 0x0060600000000000ULL,
236         0x0020600000000000ULL, 0x0000600000000000ULL,
237         0x0040600000000000ULL, 0x0060600000000000ULL,
238         END_SIGN
239 };
240
241 /* Module Loadable parameters. */
242 static unsigned int tx_fifo_num = 1;
243 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
244     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
245 static unsigned int rx_ring_num = 1;
246 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
247     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
248 static unsigned int rts_frm_len[MAX_RX_RINGS] =
249     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
250 static unsigned int use_continuous_tx_intrs = 1;
251 static unsigned int rmac_pause_time = 65535;
252 static unsigned int mc_pause_threshold_q0q3 = 187;
253 static unsigned int mc_pause_threshold_q4q7 = 187;
254 static unsigned int shared_splits;
255 static unsigned int tmac_util_period = 5;
256 static unsigned int rmac_util_period = 5;
257 #ifndef CONFIG_S2IO_NAPI
258 static unsigned int indicate_max_pkts;
259 #endif
260
261 /*
262  * S2IO device table.
263  * This table lists all the devices that this driver supports.
264  */
265 static struct pci_device_id s2io_tbl[] __devinitdata = {
266         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
267          PCI_ANY_ID, PCI_ANY_ID},
268         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
269          PCI_ANY_ID, PCI_ANY_ID},
270         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
271          PCI_ANY_ID, PCI_ANY_ID},
272         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
273          PCI_ANY_ID, PCI_ANY_ID},
274         {0,}
275 };
276
277 MODULE_DEVICE_TABLE(pci, s2io_tbl);
278
279 static struct pci_driver s2io_driver = {
280       .name = "S2IO",
281       .id_table = s2io_tbl,
282       .probe = s2io_init_nic,
283       .remove = __devexit_p(s2io_rem_nic),
284 };
285
286 /* A simplifier macro used both by init and free shared_mem Fns(). */
287 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
288
289 /**
290  * init_shared_mem - Allocation and Initialization of Memory
291  * @nic: Device private variable.
292  * Description: The function allocates all the memory areas shared
293  * between the NIC and the driver. This includes Tx descriptors,
294  * Rx descriptors and the statistics block.
295  */
296
297 static int init_shared_mem(struct s2io_nic *nic)
298 {
299         u32 size;
300         void *tmp_v_addr, *tmp_v_addr_next;
301         dma_addr_t tmp_p_addr, tmp_p_addr_next;
302         RxD_block_t *pre_rxd_blk = NULL;
303         int i, j, blk_cnt, rx_sz, tx_sz;
304         int lst_size, lst_per_page;
305         struct net_device *dev = nic->dev;
306 #ifdef CONFIG_2BUFF_MODE
307         u64 tmp;
308         buffAdd_t *ba;
309 #endif
310
311         mac_info_t *mac_control;
312         struct config_param *config;
313
314         mac_control = &nic->mac_control;
315         config = &nic->config;
316
317
318         /* Allocation and initialization of TXDLs in FIOFs */
319         size = 0;
320         for (i = 0; i < config->tx_fifo_num; i++) {
321                 size += config->tx_cfg[i].fifo_len;
322         }
323         if (size > MAX_AVAILABLE_TXDS) {
324                 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
325                           dev->name);
326                 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
327                 DBG_PRINT(ERR_DBG, "that can be used\n");
328                 return FAILURE;
329         }
330
331         lst_size = (sizeof(TxD_t) * config->max_txds);
332         tx_sz = lst_size * size;
333         lst_per_page = PAGE_SIZE / lst_size;
334
335         for (i = 0; i < config->tx_fifo_num; i++) {
336                 int fifo_len = config->tx_cfg[i].fifo_len;
337                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
338                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
339                                                           GFP_KERNEL);
340                 if (!mac_control->fifos[i].list_info) {
341                         DBG_PRINT(ERR_DBG,
342                                   "Malloc failed for list_info\n");
343                         return -ENOMEM;
344                 }
345                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
346         }
347         for (i = 0; i < config->tx_fifo_num; i++) {
348                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
349                                                 lst_per_page);
350                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
351                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
352                     config->tx_cfg[i].fifo_len - 1;
353                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
354                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
355                     config->tx_cfg[i].fifo_len - 1;
356                 mac_control->fifos[i].fifo_no = i;
357                 mac_control->fifos[i].nic = nic;
358                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
359
360                 for (j = 0; j < page_num; j++) {
361                         int k = 0;
362                         dma_addr_t tmp_p;
363                         void *tmp_v;
364                         tmp_v = pci_alloc_consistent(nic->pdev,
365                                                      PAGE_SIZE, &tmp_p);
366                         if (!tmp_v) {
367                                 DBG_PRINT(ERR_DBG,
368                                           "pci_alloc_consistent ");
369                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
370                                 return -ENOMEM;
371                         }
372                         while (k < lst_per_page) {
373                                 int l = (j * lst_per_page) + k;
374                                 if (l == config->tx_cfg[i].fifo_len)
375                                         break;
376                                 mac_control->fifos[i].list_info[l].list_virt_addr =
377                                     tmp_v + (k * lst_size);
378                                 mac_control->fifos[i].list_info[l].list_phy_addr =
379                                     tmp_p + (k * lst_size);
380                                 k++;
381                         }
382                 }
383         }
384
385         /* Allocation and initialization of RXDs in Rings */
386         size = 0;
387         for (i = 0; i < config->rx_ring_num; i++) {
388                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
389                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
390                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
391                                   i);
392                         DBG_PRINT(ERR_DBG, "RxDs per Block");
393                         return FAILURE;
394                 }
395                 size += config->rx_cfg[i].num_rxd;
396                 mac_control->rings[i].block_count =
397                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
398                 mac_control->rings[i].pkt_cnt =
399                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
400         }
401         size = (size * (sizeof(RxD_t)));
402         rx_sz = size;
403
404         for (i = 0; i < config->rx_ring_num; i++) {
405                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
406                 mac_control->rings[i].rx_curr_get_info.offset = 0;
407                 mac_control->rings[i].rx_curr_get_info.ring_len =
408                     config->rx_cfg[i].num_rxd - 1;
409                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
410                 mac_control->rings[i].rx_curr_put_info.offset = 0;
411                 mac_control->rings[i].rx_curr_put_info.ring_len =
412                     config->rx_cfg[i].num_rxd - 1;
413                 mac_control->rings[i].nic = nic;
414                 mac_control->rings[i].ring_no = i;
415
416                 blk_cnt =
417                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
418                 /*  Allocating all the Rx blocks */
419                 for (j = 0; j < blk_cnt; j++) {
420 #ifndef CONFIG_2BUFF_MODE
421                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
422 #else
423                         size = SIZE_OF_BLOCK;
424 #endif
425                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
426                                                           &tmp_p_addr);
427                         if (tmp_v_addr == NULL) {
428                                 /*
429                                  * In case of failure, free_shared_mem()
430                                  * is called, which should free any
431                                  * memory that was alloced till the
432                                  * failure happened.
433                                  */
434                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
435                                     tmp_v_addr;
436                                 return -ENOMEM;
437                         }
438                         memset(tmp_v_addr, 0, size);
439                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
440                                 tmp_v_addr;
441                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
442                                 tmp_p_addr;
443                 }
444                 /* Interlinking all Rx Blocks */
445                 for (j = 0; j < blk_cnt; j++) {
446                         tmp_v_addr =
447                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
448                         tmp_v_addr_next =
449                                 mac_control->rings[i].rx_blocks[(j + 1) %
450                                               blk_cnt].block_virt_addr;
451                         tmp_p_addr =
452                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
453                         tmp_p_addr_next =
454                                 mac_control->rings[i].rx_blocks[(j + 1) %
455                                               blk_cnt].block_dma_addr;
456
457                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
458                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
459                                                                  * marker.
460                                                                  */
461 #ifndef CONFIG_2BUFF_MODE
462                         pre_rxd_blk->reserved_2_pNext_RxD_block =
463                             (unsigned long) tmp_v_addr_next;
464 #endif
465                         pre_rxd_blk->pNext_RxD_Blk_physical =
466                             (u64) tmp_p_addr_next;
467                 }
468         }
469
470 #ifdef CONFIG_2BUFF_MODE
471         /*
472          * Allocation of Storages for buffer addresses in 2BUFF mode
473          * and the buffers as well.
474          */
475         for (i = 0; i < config->rx_ring_num; i++) {
476                 blk_cnt =
477                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
478                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
479                                      GFP_KERNEL);
480                 if (!mac_control->rings[i].ba)
481                         return -ENOMEM;
482                 for (j = 0; j < blk_cnt; j++) {
483                         int k = 0;
484                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
485                                                  (MAX_RXDS_PER_BLOCK + 1)),
486                                                 GFP_KERNEL);
487                         if (!mac_control->rings[i].ba[j])
488                                 return -ENOMEM;
489                         while (k != MAX_RXDS_PER_BLOCK) {
490                                 ba = &mac_control->rings[i].ba[j][k];
491
492                                 ba->ba_0_org = (void *) kmalloc
493                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
494                                 if (!ba->ba_0_org)
495                                         return -ENOMEM;
496                                 tmp = (u64) ba->ba_0_org;
497                                 tmp += ALIGN_SIZE;
498                                 tmp &= ~((u64) ALIGN_SIZE);
499                                 ba->ba_0 = (void *) tmp;
500
501                                 ba->ba_1_org = (void *) kmalloc
502                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
503                                 if (!ba->ba_1_org)
504                                         return -ENOMEM;
505                                 tmp = (u64) ba->ba_1_org;
506                                 tmp += ALIGN_SIZE;
507                                 tmp &= ~((u64) ALIGN_SIZE);
508                                 ba->ba_1 = (void *) tmp;
509                                 k++;
510                         }
511                 }
512         }
513 #endif
514
515         /* Allocation and initialization of Statistics block */
516         size = sizeof(StatInfo_t);
517         mac_control->stats_mem = pci_alloc_consistent
518             (nic->pdev, size, &mac_control->stats_mem_phy);
519
520         if (!mac_control->stats_mem) {
521                 /*
522                  * In case of failure, free_shared_mem() is called, which
523                  * should free any memory that was alloced till the
524                  * failure happened.
525                  */
526                 return -ENOMEM;
527         }
528         mac_control->stats_mem_sz = size;
529
530         tmp_v_addr = mac_control->stats_mem;
531         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
532         memset(tmp_v_addr, 0, size);
533         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
534                   (unsigned long long) tmp_p_addr);
535
536         return SUCCESS;
537 }
538
539 /**
540  * free_shared_mem - Free the allocated Memory
541  * @nic:  Device private variable.
542  * Description: This function is to free all memory locations allocated by
543  * the init_shared_mem() function and return it to the kernel.
544  */
545
546 static void free_shared_mem(struct s2io_nic *nic)
547 {
548         int i, j, blk_cnt, size;
549         void *tmp_v_addr;
550         dma_addr_t tmp_p_addr;
551         mac_info_t *mac_control;
552         struct config_param *config;
553         int lst_size, lst_per_page;
554
555
556         if (!nic)
557                 return;
558
559         mac_control = &nic->mac_control;
560         config = &nic->config;
561
562         lst_size = (sizeof(TxD_t) * config->max_txds);
563         lst_per_page = PAGE_SIZE / lst_size;
564
565         for (i = 0; i < config->tx_fifo_num; i++) {
566                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
567                                                 lst_per_page);
568                 for (j = 0; j < page_num; j++) {
569                         int mem_blks = (j * lst_per_page);
570                         if (!mac_control->fifos[i].list_info[mem_blks].
571                             list_virt_addr)
572                                 break;
573                         pci_free_consistent(nic->pdev, PAGE_SIZE,
574                                             mac_control->fifos[i].
575                                             list_info[mem_blks].
576                                             list_virt_addr,
577                                             mac_control->fifos[i].
578                                             list_info[mem_blks].
579                                             list_phy_addr);
580                 }
581                 kfree(mac_control->fifos[i].list_info);
582         }
583
584 #ifndef CONFIG_2BUFF_MODE
585         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
586 #else
587         size = SIZE_OF_BLOCK;
588 #endif
589         for (i = 0; i < config->rx_ring_num; i++) {
590                 blk_cnt = mac_control->rings[i].block_count;
591                 for (j = 0; j < blk_cnt; j++) {
592                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
593                                 block_virt_addr;
594                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
595                                 block_dma_addr;
596                         if (tmp_v_addr == NULL)
597                                 break;
598                         pci_free_consistent(nic->pdev, size,
599                                             tmp_v_addr, tmp_p_addr);
600                 }
601         }
602
603 #ifdef CONFIG_2BUFF_MODE
604         /* Freeing buffer storage addresses in 2BUFF mode. */
605         for (i = 0; i < config->rx_ring_num; i++) {
606                 blk_cnt =
607                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
608                 for (j = 0; j < blk_cnt; j++) {
609                         int k = 0;
610                         if (!mac_control->rings[i].ba[j])
611                                 continue;
612                         while (k != MAX_RXDS_PER_BLOCK) {
613                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
614                                 kfree(ba->ba_0_org);
615                                 kfree(ba->ba_1_org);
616                                 k++;
617                         }
618                         kfree(mac_control->rings[i].ba[j]);
619                 }
620                 if (mac_control->rings[i].ba)
621                         kfree(mac_control->rings[i].ba);
622         }
623 #endif
624
625         if (mac_control->stats_mem) {
626                 pci_free_consistent(nic->pdev,
627                                     mac_control->stats_mem_sz,
628                                     mac_control->stats_mem,
629                                     mac_control->stats_mem_phy);
630         }
631 }
632
633 /**
634  *  init_nic - Initialization of hardware
635  *  @nic: device peivate variable
636  *  Description: The function sequentially configures every block
637  *  of the H/W from their reset values.
638  *  Return Value:  SUCCESS on success and
639  *  '-1' on failure (endian settings incorrect).
640  */
641
642 static int init_nic(struct s2io_nic *nic)
643 {
644         XENA_dev_config_t __iomem *bar0 = nic->bar0;
645         struct net_device *dev = nic->dev;
646         register u64 val64 = 0;
647         void __iomem *add;
648         u32 time;
649         int i, j;
650         mac_info_t *mac_control;
651         struct config_param *config;
652         int mdio_cnt = 0, dtx_cnt = 0;
653         unsigned long long mem_share;
654         int mem_size;
655
656         mac_control = &nic->mac_control;
657         config = &nic->config;
658
659         /* to set the swapper controle on the card */
660         if(s2io_set_swapper(nic)) {
661                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
662                 return -1;
663         }
664
665         /* Remove XGXS from reset state */
666         val64 = 0;
667         writeq(val64, &bar0->sw_reset);
668         msleep(500);
669         val64 = readq(&bar0->sw_reset);
670
671         /*  Enable Receiving broadcasts */
672         add = &bar0->mac_cfg;
673         val64 = readq(&bar0->mac_cfg);
674         val64 |= MAC_RMAC_BCAST_ENABLE;
675         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
676         writel((u32) val64, add);
677         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
678         writel((u32) (val64 >> 32), (add + 4));
679
680         /* Read registers in all blocks */
681         val64 = readq(&bar0->mac_int_mask);
682         val64 = readq(&bar0->mc_int_mask);
683         val64 = readq(&bar0->xgxs_int_mask);
684
685         /*  Set MTU */
686         val64 = dev->mtu;
687         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
688
689         /*
690          * Configuring the XAUI Interface of Xena.
691          * ***************************************
692          * To Configure the Xena's XAUI, one has to write a series
693          * of 64 bit values into two registers in a particular
694          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
695          * which will be defined in the array of configuration values
696          * (default_dtx_cfg & default_mdio_cfg) at appropriate places
697          * to switch writing from one regsiter to another. We continue
698          * writing these values until we encounter the 'END_SIGN' macro.
699          * For example, After making a series of 21 writes into
700          * dtx_control register the 'SWITCH_SIGN' appears and hence we
701          * start writing into mdio_control until we encounter END_SIGN.
702          */
703         while (1) {
704               dtx_cfg:
705                 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
706                         if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
707                                 dtx_cnt++;
708                                 goto mdio_cfg;
709                         }
710                         SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
711                                           &bar0->dtx_control, UF);
712                         val64 = readq(&bar0->dtx_control);
713                         dtx_cnt++;
714                 }
715               mdio_cfg:
716                 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
717                         if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
718                                 mdio_cnt++;
719                                 goto dtx_cfg;
720                         }
721                         SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
722                                           &bar0->mdio_control, UF);
723                         val64 = readq(&bar0->mdio_control);
724                         mdio_cnt++;
725                 }
726                 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
727                     (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
728                         break;
729                 } else {
730                         goto dtx_cfg;
731                 }
732         }
733
734         /*  Tx DMA Initialization */
735         val64 = 0;
736         writeq(val64, &bar0->tx_fifo_partition_0);
737         writeq(val64, &bar0->tx_fifo_partition_1);
738         writeq(val64, &bar0->tx_fifo_partition_2);
739         writeq(val64, &bar0->tx_fifo_partition_3);
740
741
742         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
743                 val64 |=
744                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
745                          13) | vBIT(config->tx_cfg[i].fifo_priority,
746                                     ((i * 32) + 5), 3);
747
748                 if (i == (config->tx_fifo_num - 1)) {
749                         if (i % 2 == 0)
750                                 i++;
751                 }
752
753                 switch (i) {
754                 case 1:
755                         writeq(val64, &bar0->tx_fifo_partition_0);
756                         val64 = 0;
757                         break;
758                 case 3:
759                         writeq(val64, &bar0->tx_fifo_partition_1);
760                         val64 = 0;
761                         break;
762                 case 5:
763                         writeq(val64, &bar0->tx_fifo_partition_2);
764                         val64 = 0;
765                         break;
766                 case 7:
767                         writeq(val64, &bar0->tx_fifo_partition_3);
768                         break;
769                 }
770         }
771
772         /* Enable Tx FIFO partition 0. */
773         val64 = readq(&bar0->tx_fifo_partition_0);
774         val64 |= BIT(0);        /* To enable the FIFO partition. */
775         writeq(val64, &bar0->tx_fifo_partition_0);
776
777         /*
778          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
779          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
780          */
781         if (get_xena_rev_id(nic->pdev) < 4)
782                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
783
784         val64 = readq(&bar0->tx_fifo_partition_0);
785         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
786                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
787
788         /*
789          * Initialization of Tx_PA_CONFIG register to ignore packet
790          * integrity checking.
791          */
792         val64 = readq(&bar0->tx_pa_cfg);
793         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
794             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
795         writeq(val64, &bar0->tx_pa_cfg);
796
797         /* Rx DMA intialization. */
798         val64 = 0;
799         for (i = 0; i < config->rx_ring_num; i++) {
800                 val64 |=
801                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
802                          3);
803         }
804         writeq(val64, &bar0->rx_queue_priority);
805
806         /*
807          * Allocating equal share of memory to all the
808          * configured Rings.
809          */
810         val64 = 0;
811         mem_size = 64;
812         for (i = 0; i < config->rx_ring_num; i++) {
813                 switch (i) {
814                 case 0:
815                         mem_share = (mem_size / config->rx_ring_num +
816                                      mem_size % config->rx_ring_num);
817                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
818                         continue;
819                 case 1:
820                         mem_share = (mem_size / config->rx_ring_num);
821                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
822                         continue;
823                 case 2:
824                         mem_share = (mem_size / config->rx_ring_num);
825                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
826                         continue;
827                 case 3:
828                         mem_share = (mem_size / config->rx_ring_num);
829                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
830                         continue;
831                 case 4:
832                         mem_share = (mem_size / config->rx_ring_num);
833                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
834                         continue;
835                 case 5:
836                         mem_share = (mem_size / config->rx_ring_num);
837                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
838                         continue;
839                 case 6:
840                         mem_share = (mem_size / config->rx_ring_num);
841                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
842                         continue;
843                 case 7:
844                         mem_share = (mem_size / config->rx_ring_num);
845                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
846                         continue;
847                 }
848         }
849         writeq(val64, &bar0->rx_queue_cfg);
850
851         /*
852          * Filling Tx round robin registers
853          * as per the number of FIFOs
854          */
855         switch (config->tx_fifo_num) {
856         case 1:
857                 val64 = 0x0000000000000000ULL;
858                 writeq(val64, &bar0->tx_w_round_robin_0);
859                 writeq(val64, &bar0->tx_w_round_robin_1);
860                 writeq(val64, &bar0->tx_w_round_robin_2);
861                 writeq(val64, &bar0->tx_w_round_robin_3);
862                 writeq(val64, &bar0->tx_w_round_robin_4);
863                 break;
864         case 2:
865                 val64 = 0x0000010000010000ULL;
866                 writeq(val64, &bar0->tx_w_round_robin_0);
867                 val64 = 0x0100000100000100ULL;
868                 writeq(val64, &bar0->tx_w_round_robin_1);
869                 val64 = 0x0001000001000001ULL;
870                 writeq(val64, &bar0->tx_w_round_robin_2);
871                 val64 = 0x0000010000010000ULL;
872                 writeq(val64, &bar0->tx_w_round_robin_3);
873                 val64 = 0x0100000000000000ULL;
874                 writeq(val64, &bar0->tx_w_round_robin_4);
875                 break;
876         case 3:
877                 val64 = 0x0001000102000001ULL;
878                 writeq(val64, &bar0->tx_w_round_robin_0);
879                 val64 = 0x0001020000010001ULL;
880                 writeq(val64, &bar0->tx_w_round_robin_1);
881                 val64 = 0x0200000100010200ULL;
882                 writeq(val64, &bar0->tx_w_round_robin_2);
883                 val64 = 0x0001000102000001ULL;
884                 writeq(val64, &bar0->tx_w_round_robin_3);
885                 val64 = 0x0001020000000000ULL;
886                 writeq(val64, &bar0->tx_w_round_robin_4);
887                 break;
888         case 4:
889                 val64 = 0x0001020300010200ULL;
890                 writeq(val64, &bar0->tx_w_round_robin_0);
891                 val64 = 0x0100000102030001ULL;
892                 writeq(val64, &bar0->tx_w_round_robin_1);
893                 val64 = 0x0200010000010203ULL;
894                 writeq(val64, &bar0->tx_w_round_robin_2);
895                 val64 = 0x0001020001000001ULL;
896                 writeq(val64, &bar0->tx_w_round_robin_3);
897                 val64 = 0x0203000100000000ULL;
898                 writeq(val64, &bar0->tx_w_round_robin_4);
899                 break;
900         case 5:
901                 val64 = 0x0001000203000102ULL;
902                 writeq(val64, &bar0->tx_w_round_robin_0);
903                 val64 = 0x0001020001030004ULL;
904                 writeq(val64, &bar0->tx_w_round_robin_1);
905                 val64 = 0x0001000203000102ULL;
906                 writeq(val64, &bar0->tx_w_round_robin_2);
907                 val64 = 0x0001020001030004ULL;
908                 writeq(val64, &bar0->tx_w_round_robin_3);
909                 val64 = 0x0001000000000000ULL;
910                 writeq(val64, &bar0->tx_w_round_robin_4);
911                 break;
912         case 6:
913                 val64 = 0x0001020304000102ULL;
914                 writeq(val64, &bar0->tx_w_round_robin_0);
915                 val64 = 0x0304050001020001ULL;
916                 writeq(val64, &bar0->tx_w_round_robin_1);
917                 val64 = 0x0203000100000102ULL;
918                 writeq(val64, &bar0->tx_w_round_robin_2);
919                 val64 = 0x0304000102030405ULL;
920                 writeq(val64, &bar0->tx_w_round_robin_3);
921                 val64 = 0x0001000200000000ULL;
922                 writeq(val64, &bar0->tx_w_round_robin_4);
923                 break;
924         case 7:
925                 val64 = 0x0001020001020300ULL;
926                 writeq(val64, &bar0->tx_w_round_robin_0);
927                 val64 = 0x0102030400010203ULL;
928                 writeq(val64, &bar0->tx_w_round_robin_1);
929                 val64 = 0x0405060001020001ULL;
930                 writeq(val64, &bar0->tx_w_round_robin_2);
931                 val64 = 0x0304050000010200ULL;
932                 writeq(val64, &bar0->tx_w_round_robin_3);
933                 val64 = 0x0102030000000000ULL;
934                 writeq(val64, &bar0->tx_w_round_robin_4);
935                 break;
936         case 8:
937                 val64 = 0x0001020300040105ULL;
938                 writeq(val64, &bar0->tx_w_round_robin_0);
939                 val64 = 0x0200030106000204ULL;
940                 writeq(val64, &bar0->tx_w_round_robin_1);
941                 val64 = 0x0103000502010007ULL;
942                 writeq(val64, &bar0->tx_w_round_robin_2);
943                 val64 = 0x0304010002060500ULL;
944                 writeq(val64, &bar0->tx_w_round_robin_3);
945                 val64 = 0x0103020400000000ULL;
946                 writeq(val64, &bar0->tx_w_round_robin_4);
947                 break;
948         }
949
950         /* Filling the Rx round robin registers as per the
951          * number of Rings and steering based on QoS.
952          */
953         switch (config->rx_ring_num) {
954         case 1:
955                 val64 = 0x8080808080808080ULL;
956                 writeq(val64, &bar0->rts_qos_steering);
957                 break;
958         case 2:
959                 val64 = 0x0000010000010000ULL;
960                 writeq(val64, &bar0->rx_w_round_robin_0);
961                 val64 = 0x0100000100000100ULL;
962                 writeq(val64, &bar0->rx_w_round_robin_1);
963                 val64 = 0x0001000001000001ULL;
964                 writeq(val64, &bar0->rx_w_round_robin_2);
965                 val64 = 0x0000010000010000ULL;
966                 writeq(val64, &bar0->rx_w_round_robin_3);
967                 val64 = 0x0100000000000000ULL;
968                 writeq(val64, &bar0->rx_w_round_robin_4);
969
970                 val64 = 0x8080808040404040ULL;
971                 writeq(val64, &bar0->rts_qos_steering);
972                 break;
973         case 3:
974                 val64 = 0x0001000102000001ULL;
975                 writeq(val64, &bar0->rx_w_round_robin_0);
976                 val64 = 0x0001020000010001ULL;
977                 writeq(val64, &bar0->rx_w_round_robin_1);
978                 val64 = 0x0200000100010200ULL;
979                 writeq(val64, &bar0->rx_w_round_robin_2);
980                 val64 = 0x0001000102000001ULL;
981                 writeq(val64, &bar0->rx_w_round_robin_3);
982                 val64 = 0x0001020000000000ULL;
983                 writeq(val64, &bar0->rx_w_round_robin_4);
984
985                 val64 = 0x8080804040402020ULL;
986                 writeq(val64, &bar0->rts_qos_steering);
987                 break;
988         case 4:
989                 val64 = 0x0001020300010200ULL;
990                 writeq(val64, &bar0->rx_w_round_robin_0);
991                 val64 = 0x0100000102030001ULL;
992                 writeq(val64, &bar0->rx_w_round_robin_1);
993                 val64 = 0x0200010000010203ULL;
994                 writeq(val64, &bar0->rx_w_round_robin_2);
995                 val64 = 0x0001020001000001ULL;  
996                 writeq(val64, &bar0->rx_w_round_robin_3);
997                 val64 = 0x0203000100000000ULL;
998                 writeq(val64, &bar0->rx_w_round_robin_4);
999
1000                 val64 = 0x8080404020201010ULL;
1001                 writeq(val64, &bar0->rts_qos_steering);
1002                 break;
1003         case 5:
1004                 val64 = 0x0001000203000102ULL;
1005                 writeq(val64, &bar0->rx_w_round_robin_0);
1006                 val64 = 0x0001020001030004ULL;
1007                 writeq(val64, &bar0->rx_w_round_robin_1);
1008                 val64 = 0x0001000203000102ULL;
1009                 writeq(val64, &bar0->rx_w_round_robin_2);
1010                 val64 = 0x0001020001030004ULL;
1011                 writeq(val64, &bar0->rx_w_round_robin_3);
1012                 val64 = 0x0001000000000000ULL;
1013                 writeq(val64, &bar0->rx_w_round_robin_4);
1014
1015                 val64 = 0x8080404020201008ULL;
1016                 writeq(val64, &bar0->rts_qos_steering);
1017                 break;
1018         case 6:
1019                 val64 = 0x0001020304000102ULL;
1020                 writeq(val64, &bar0->rx_w_round_robin_0);
1021                 val64 = 0x0304050001020001ULL;
1022                 writeq(val64, &bar0->rx_w_round_robin_1);
1023                 val64 = 0x0203000100000102ULL;
1024                 writeq(val64, &bar0->rx_w_round_robin_2);
1025                 val64 = 0x0304000102030405ULL;
1026                 writeq(val64, &bar0->rx_w_round_robin_3);
1027                 val64 = 0x0001000200000000ULL;
1028                 writeq(val64, &bar0->rx_w_round_robin_4);
1029
1030                 val64 = 0x8080404020100804ULL;
1031                 writeq(val64, &bar0->rts_qos_steering);
1032                 break;
1033         case 7:
1034                 val64 = 0x0001020001020300ULL;
1035                 writeq(val64, &bar0->rx_w_round_robin_0);
1036                 val64 = 0x0102030400010203ULL;
1037                 writeq(val64, &bar0->rx_w_round_robin_1);
1038                 val64 = 0x0405060001020001ULL;
1039                 writeq(val64, &bar0->rx_w_round_robin_2);
1040                 val64 = 0x0304050000010200ULL;
1041                 writeq(val64, &bar0->rx_w_round_robin_3);
1042                 val64 = 0x0102030000000000ULL;
1043                 writeq(val64, &bar0->rx_w_round_robin_4);
1044
1045                 val64 = 0x8080402010080402ULL;
1046                 writeq(val64, &bar0->rts_qos_steering);
1047                 break;
1048         case 8:
1049                 val64 = 0x0001020300040105ULL;
1050                 writeq(val64, &bar0->rx_w_round_robin_0);
1051                 val64 = 0x0200030106000204ULL;
1052                 writeq(val64, &bar0->rx_w_round_robin_1);
1053                 val64 = 0x0103000502010007ULL;
1054                 writeq(val64, &bar0->rx_w_round_robin_2);
1055                 val64 = 0x0304010002060500ULL;
1056                 writeq(val64, &bar0->rx_w_round_robin_3);
1057                 val64 = 0x0103020400000000ULL;
1058                 writeq(val64, &bar0->rx_w_round_robin_4);
1059
1060                 val64 = 0x8040201008040201ULL;
1061                 writeq(val64, &bar0->rts_qos_steering);
1062                 break;
1063         }
1064
1065         /* UDP Fix */
1066         val64 = 0;
1067         for (i = 0; i < 8; i++)
1068                 writeq(val64, &bar0->rts_frm_len_n[i]);
1069
1070         /* Set the default rts frame length for the rings configured */
1071         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1072         for (i = 0 ; i < config->rx_ring_num ; i++)
1073                 writeq(val64, &bar0->rts_frm_len_n[i]);
1074
1075         /* Set the frame length for the configured rings
1076          * desired by the user
1077          */
1078         for (i = 0; i < config->rx_ring_num; i++) {
1079                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1080                  * specified frame length steering.
1081                  * If the user provides the frame length then program
1082                  * the rts_frm_len register for those values or else
1083                  * leave it as it is.
1084                  */
1085                 if (rts_frm_len[i] != 0) {
1086                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1087                                 &bar0->rts_frm_len_n[i]);
1088                 }
1089         }
1090
1091         /* Program statistics memory */
1092         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1093
1094         /*
1095          * Initializing the sampling rate for the device to calculate the
1096          * bandwidth utilization.
1097          */
1098         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1099             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1100         writeq(val64, &bar0->mac_link_util);
1101
1102
1103         /*
1104          * Initializing the Transmit and Receive Traffic Interrupt
1105          * Scheme.
1106          */
1107         /*
1108          * TTI Initialization. Default Tx timer gets us about
1109          * 250 interrupts per sec. Continuous interrupts are enabled
1110          * by default.
1111          */
1112         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1113             TTI_DATA1_MEM_TX_URNG_A(0xA) |
1114             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1115             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1116         if (use_continuous_tx_intrs)
1117                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1118         writeq(val64, &bar0->tti_data1_mem);
1119
1120         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1121             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1122             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1123         writeq(val64, &bar0->tti_data2_mem);
1124
1125         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1126         writeq(val64, &bar0->tti_command_mem);
1127
1128         /*
1129          * Once the operation completes, the Strobe bit of the command
1130          * register will be reset. We poll for this particular condition
1131          * We wait for a maximum of 500ms for the operation to complete,
1132          * if it's not complete by then we return error.
1133          */
1134         time = 0;
1135         while (TRUE) {
1136                 val64 = readq(&bar0->tti_command_mem);
1137                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1138                         break;
1139                 }
1140                 if (time > 10) {
1141                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1142                                   dev->name);
1143                         return -1;
1144                 }
1145                 msleep(50);
1146                 time++;
1147         }
1148
1149         /* RTI Initialization */
1150         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1151             RTI_DATA1_MEM_RX_URNG_A(0xA) |
1152             RTI_DATA1_MEM_RX_URNG_B(0x10) |
1153             RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1154
1155         writeq(val64, &bar0->rti_data1_mem);
1156
1157         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1158             RTI_DATA2_MEM_RX_UFC_B(0x2) |
1159             RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1160         writeq(val64, &bar0->rti_data2_mem);
1161
1162         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1163         writeq(val64, &bar0->rti_command_mem);
1164
1165         /*
1166          * Once the operation completes, the Strobe bit of the
1167          * command register will be reset. We poll for this
1168          * particular condition. We wait for a maximum of 500ms
1169          * for the operation to complete, if it's not complete
1170          * by then we return error.
1171          */
1172         time = 0;
1173         while (TRUE) {
1174                 val64 = readq(&bar0->rti_command_mem);
1175                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1176                         break;
1177                 }
1178                 if (time > 10) {
1179                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1180                                   dev->name);
1181                         return -1;
1182                 }
1183                 time++;
1184                 msleep(50);
1185         }
1186
1187         /*
1188          * Initializing proper values as Pause threshold into all
1189          * the 8 Queues on Rx side.
1190          */
1191         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1192         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1193
1194         /* Disable RMAC PAD STRIPPING */
1195         add = (void *) &bar0->mac_cfg;
1196         val64 = readq(&bar0->mac_cfg);
1197         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1198         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1199         writel((u32) (val64), add);
1200         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1201         writel((u32) (val64 >> 32), (add + 4));
1202         val64 = readq(&bar0->mac_cfg);
1203
1204         /*
1205          * Set the time value to be inserted in the pause frame
1206          * generated by xena.
1207          */
1208         val64 = readq(&bar0->rmac_pause_cfg);
1209         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1210         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1211         writeq(val64, &bar0->rmac_pause_cfg);
1212
1213         /*
1214          * Set the Threshold Limit for Generating the pause frame
1215          * If the amount of data in any Queue exceeds ratio of
1216          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1217          * pause frame is generated
1218          */
1219         val64 = 0;
1220         for (i = 0; i < 4; i++) {
1221                 val64 |=
1222                     (((u64) 0xFF00 | nic->mac_control.
1223                       mc_pause_threshold_q0q3)
1224                      << (i * 2 * 8));
1225         }
1226         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1227
1228         val64 = 0;
1229         for (i = 0; i < 4; i++) {
1230                 val64 |=
1231                     (((u64) 0xFF00 | nic->mac_control.
1232                       mc_pause_threshold_q4q7)
1233                      << (i * 2 * 8));
1234         }
1235         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1236
1237         /*
1238          * TxDMA will stop Read request if the number of read split has
1239          * exceeded the limit pointed by shared_splits
1240          */
1241         val64 = readq(&bar0->pic_control);
1242         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1243         writeq(val64, &bar0->pic_control);
1244
1245         return SUCCESS;
1246 }
1247
1248 /**
1249  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1250  *  @nic: device private variable,
1251  *  @mask: A mask indicating which Intr block must be modified and,
1252  *  @flag: A flag indicating whether to enable or disable the Intrs.
1253  *  Description: This function will either disable or enable the interrupts
1254  *  depending on the flag argument. The mask argument can be used to
1255  *  enable/disable any Intr block.
1256  *  Return Value: NONE.
1257  */
1258
1259 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1260 {
1261         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1262         register u64 val64 = 0, temp64 = 0;
1263
1264         /*  Top level interrupt classification */
1265         /*  PIC Interrupts */
1266         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1267                 /*  Enable PIC Intrs in the general intr mask register */
1268                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1269                 if (flag == ENABLE_INTRS) {
1270                         temp64 = readq(&bar0->general_int_mask);
1271                         temp64 &= ~((u64) val64);
1272                         writeq(temp64, &bar0->general_int_mask);
1273                         /*
1274                          * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1275                          * interrupts for now.
1276                          * TODO
1277                          */
1278                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1279                         /*
1280                          * No MSI Support is available presently, so TTI and
1281                          * RTI interrupts are also disabled.
1282                          */
1283                 } else if (flag == DISABLE_INTRS) {
1284                         /*
1285                          * Disable PIC Intrs in the general
1286                          * intr mask register
1287                          */
1288                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1289                         temp64 = readq(&bar0->general_int_mask);
1290                         val64 |= temp64;
1291                         writeq(val64, &bar0->general_int_mask);
1292                 }
1293         }
1294
1295         /*  DMA Interrupts */
1296         /*  Enabling/Disabling Tx DMA interrupts */
1297         if (mask & TX_DMA_INTR) {
1298                 /* Enable TxDMA Intrs in the general intr mask register */
1299                 val64 = TXDMA_INT_M;
1300                 if (flag == ENABLE_INTRS) {
1301                         temp64 = readq(&bar0->general_int_mask);
1302                         temp64 &= ~((u64) val64);
1303                         writeq(temp64, &bar0->general_int_mask);
1304                         /*
1305                          * Keep all interrupts other than PFC interrupt
1306                          * and PCC interrupt disabled in DMA level.
1307                          */
1308                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1309                                                       TXDMA_PCC_INT_M);
1310                         writeq(val64, &bar0->txdma_int_mask);
1311                         /*
1312                          * Enable only the MISC error 1 interrupt in PFC block
1313                          */
1314                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1315                         writeq(val64, &bar0->pfc_err_mask);
1316                         /*
1317                          * Enable only the FB_ECC error interrupt in PCC block
1318                          */
1319                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1320                         writeq(val64, &bar0->pcc_err_mask);
1321                 } else if (flag == DISABLE_INTRS) {
1322                         /*
1323                          * Disable TxDMA Intrs in the general intr mask
1324                          * register
1325                          */
1326                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1327                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1328                         temp64 = readq(&bar0->general_int_mask);
1329                         val64 |= temp64;
1330                         writeq(val64, &bar0->general_int_mask);
1331                 }
1332         }
1333
1334         /*  Enabling/Disabling Rx DMA interrupts */
1335         if (mask & RX_DMA_INTR) {
1336                 /*  Enable RxDMA Intrs in the general intr mask register */
1337                 val64 = RXDMA_INT_M;
1338                 if (flag == ENABLE_INTRS) {
1339                         temp64 = readq(&bar0->general_int_mask);
1340                         temp64 &= ~((u64) val64);
1341                         writeq(temp64, &bar0->general_int_mask);
1342                         /*
1343                          * All RxDMA block interrupts are disabled for now
1344                          * TODO
1345                          */
1346                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1347                 } else if (flag == DISABLE_INTRS) {
1348                         /*
1349                          * Disable RxDMA Intrs in the general intr mask
1350                          * register
1351                          */
1352                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1353                         temp64 = readq(&bar0->general_int_mask);
1354                         val64 |= temp64;
1355                         writeq(val64, &bar0->general_int_mask);
1356                 }
1357         }
1358
1359         /*  MAC Interrupts */
1360         /*  Enabling/Disabling MAC interrupts */
1361         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1362                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1363                 if (flag == ENABLE_INTRS) {
1364                         temp64 = readq(&bar0->general_int_mask);
1365                         temp64 &= ~((u64) val64);
1366                         writeq(temp64, &bar0->general_int_mask);
1367                         /*
1368                          * All MAC block error interrupts are disabled for now
1369                          * except the link status change interrupt.
1370                          * TODO
1371                          */
1372                         val64 = MAC_INT_STATUS_RMAC_INT;
1373                         temp64 = readq(&bar0->mac_int_mask);
1374                         temp64 &= ~((u64) val64);
1375                         writeq(temp64, &bar0->mac_int_mask);
1376
1377                         val64 = readq(&bar0->mac_rmac_err_mask);
1378                         val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1379                         writeq(val64, &bar0->mac_rmac_err_mask);
1380                 } else if (flag == DISABLE_INTRS) {
1381                         /*
1382                          * Disable MAC Intrs in the general intr mask register
1383                          */
1384                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1385                         writeq(DISABLE_ALL_INTRS,
1386                                &bar0->mac_rmac_err_mask);
1387
1388                         temp64 = readq(&bar0->general_int_mask);
1389                         val64 |= temp64;
1390                         writeq(val64, &bar0->general_int_mask);
1391                 }
1392         }
1393
1394         /*  XGXS Interrupts */
1395         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1396                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1397                 if (flag == ENABLE_INTRS) {
1398                         temp64 = readq(&bar0->general_int_mask);
1399                         temp64 &= ~((u64) val64);
1400                         writeq(temp64, &bar0->general_int_mask);
1401                         /*
1402                          * All XGXS block error interrupts are disabled for now
1403                          * TODO
1404                          */
1405                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1406                 } else if (flag == DISABLE_INTRS) {
1407                         /*
1408                          * Disable MC Intrs in the general intr mask register
1409                          */
1410                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1411                         temp64 = readq(&bar0->general_int_mask);
1412                         val64 |= temp64;
1413                         writeq(val64, &bar0->general_int_mask);
1414                 }
1415         }
1416
1417         /*  Memory Controller(MC) interrupts */
1418         if (mask & MC_INTR) {
1419                 val64 = MC_INT_M;
1420                 if (flag == ENABLE_INTRS) {
1421                         temp64 = readq(&bar0->general_int_mask);
1422                         temp64 &= ~((u64) val64);
1423                         writeq(temp64, &bar0->general_int_mask);
1424                         /*
1425                          * Enable all MC Intrs.
1426                          */
1427                         writeq(0x0, &bar0->mc_int_mask);
1428                         writeq(0x0, &bar0->mc_err_mask);
1429                 } else if (flag == DISABLE_INTRS) {
1430                         /*
1431                          * Disable MC Intrs in the general intr mask register
1432                          */
1433                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1434                         temp64 = readq(&bar0->general_int_mask);
1435                         val64 |= temp64;
1436                         writeq(val64, &bar0->general_int_mask);
1437                 }
1438         }
1439
1440
1441         /*  Tx traffic interrupts */
1442         if (mask & TX_TRAFFIC_INTR) {
1443                 val64 = TXTRAFFIC_INT_M;
1444                 if (flag == ENABLE_INTRS) {
1445                         temp64 = readq(&bar0->general_int_mask);
1446                         temp64 &= ~((u64) val64);
1447                         writeq(temp64, &bar0->general_int_mask);
1448                         /*
1449                          * Enable all the Tx side interrupts
1450                          * writing 0 Enables all 64 TX interrupt levels
1451                          */
1452                         writeq(0x0, &bar0->tx_traffic_mask);
1453                 } else if (flag == DISABLE_INTRS) {
1454                         /*
1455                          * Disable Tx Traffic Intrs in the general intr mask
1456                          * register.
1457                          */
1458                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1459                         temp64 = readq(&bar0->general_int_mask);
1460                         val64 |= temp64;
1461                         writeq(val64, &bar0->general_int_mask);
1462                 }
1463         }
1464
1465         /*  Rx traffic interrupts */
1466         if (mask & RX_TRAFFIC_INTR) {
1467                 val64 = RXTRAFFIC_INT_M;
1468                 if (flag == ENABLE_INTRS) {
1469                         temp64 = readq(&bar0->general_int_mask);
1470                         temp64 &= ~((u64) val64);
1471                         writeq(temp64, &bar0->general_int_mask);
1472                         /* writing 0 Enables all 8 RX interrupt levels */
1473                         writeq(0x0, &bar0->rx_traffic_mask);
1474                 } else if (flag == DISABLE_INTRS) {
1475                         /*
1476                          * Disable Rx Traffic Intrs in the general intr mask
1477                          * register.
1478                          */
1479                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1480                         temp64 = readq(&bar0->general_int_mask);
1481                         val64 |= temp64;
1482                         writeq(val64, &bar0->general_int_mask);
1483                 }
1484         }
1485 }
1486
1487 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1488 {
1489         int ret = 0;
1490
1491         if (flag == FALSE) {
1492                 if (rev_id >= 4) {
1493                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1494                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1495                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1496                                 ret = 1;
1497                         }
1498                 } else {
1499                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1500                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1501                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1502                                 ret = 1;
1503                         }
1504                 }
1505         } else {
1506                 if (rev_id >= 4) {
1507                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1508                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1509                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1510                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1511                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1512                                 ret = 1;
1513                         }
1514                 } else {
1515                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1516                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1517                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1518                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1519                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1520                                 ret = 1;
1521                         }
1522                 }
1523         }
1524
1525         return ret;
1526 }
1527 /**
1528  *  verify_xena_quiescence - Checks whether the H/W is ready
1529  *  @val64 :  Value read from adapter status register.
1530  *  @flag : indicates if the adapter enable bit was ever written once
1531  *  before.
1532  *  Description: Returns whether the H/W is ready to go or not. Depending
1533  *  on whether adapter enable bit was written or not the comparison
1534  *  differs and the calling function passes the input argument flag to
1535  *  indicate this.
1536  *  Return: 1 If xena is quiescence
1537  *          0 If Xena is not quiescence
1538  */
1539
1540 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1541 {
1542         int ret = 0;
1543         u64 tmp64 = ~((u64) val64);
1544         int rev_id = get_xena_rev_id(sp->pdev);
1545
1546         if (!
1547             (tmp64 &
1548              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1549               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1550               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1551               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1552               ADAPTER_STATUS_P_PLL_LOCK))) {
1553                 ret = check_prc_pcc_state(val64, flag, rev_id);
1554         }
1555
1556         return ret;
1557 }
1558
1559 /**
1560  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1561  * @sp: Pointer to device specifc structure
1562  * Description :
1563  * New procedure to clear mac address reading  problems on Alpha platforms
1564  *
1565  */
1566
1567 void fix_mac_address(nic_t * sp)
1568 {
1569         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1570         u64 val64;
1571         int i = 0;
1572
1573         while (fix_mac[i] != END_SIGN) {
1574                 writeq(fix_mac[i++], &bar0->gpio_control);
1575                 udelay(10);
1576                 val64 = readq(&bar0->gpio_control);
1577         }
1578 }
1579
1580 /**
1581  *  start_nic - Turns the device on
1582  *  @nic : device private variable.
1583  *  Description:
1584  *  This function actually turns the device on. Before this  function is
1585  *  called,all Registers are configured from their reset states
1586  *  and shared memory is allocated but the NIC is still quiescent. On
1587  *  calling this function, the device interrupts are cleared and the NIC is
1588  *  literally switched on by writing into the adapter control register.
1589  *  Return Value:
1590  *  SUCCESS on success and -1 on failure.
1591  */
1592
1593 static int start_nic(struct s2io_nic *nic)
1594 {
1595         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1596         struct net_device *dev = nic->dev;
1597         register u64 val64 = 0;
1598         u16 interruptible;
1599         u16 subid, i;
1600         mac_info_t *mac_control;
1601         struct config_param *config;
1602
1603         mac_control = &nic->mac_control;
1604         config = &nic->config;
1605
1606         /*  PRC Initialization and configuration */
1607         for (i = 0; i < config->rx_ring_num; i++) {
1608                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1609                        &bar0->prc_rxd0_n[i]);
1610
1611                 val64 = readq(&bar0->prc_ctrl_n[i]);
1612 #ifndef CONFIG_2BUFF_MODE
1613                 val64 |= PRC_CTRL_RC_ENABLED;
1614 #else
1615                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1616 #endif
1617                 writeq(val64, &bar0->prc_ctrl_n[i]);
1618         }
1619
1620 #ifdef CONFIG_2BUFF_MODE
1621         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1622         val64 = readq(&bar0->rx_pa_cfg);
1623         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1624         writeq(val64, &bar0->rx_pa_cfg);
1625 #endif
1626
1627         /*
1628          * Enabling MC-RLDRAM. After enabling the device, we timeout
1629          * for around 100ms, which is approximately the time required
1630          * for the device to be ready for operation.
1631          */
1632         val64 = readq(&bar0->mc_rldram_mrs);
1633         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1634         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1635         val64 = readq(&bar0->mc_rldram_mrs);
1636
1637         msleep(100);    /* Delay by around 100 ms. */
1638
1639         /* Enabling ECC Protection. */
1640         val64 = readq(&bar0->adapter_control);
1641         val64 &= ~ADAPTER_ECC_EN;
1642         writeq(val64, &bar0->adapter_control);
1643
1644         /*
1645          * Clearing any possible Link state change interrupts that
1646          * could have popped up just before Enabling the card.
1647          */
1648         val64 = readq(&bar0->mac_rmac_err_reg);
1649         if (val64)
1650                 writeq(val64, &bar0->mac_rmac_err_reg);
1651
1652         /*
1653          * Verify if the device is ready to be enabled, if so enable
1654          * it.
1655          */
1656         val64 = readq(&bar0->adapter_status);
1657         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1658                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1659                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1660                           (unsigned long long) val64);
1661                 return FAILURE;
1662         }
1663
1664         /*  Enable select interrupts */
1665         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1666             RX_MAC_INTR | MC_INTR;
1667         en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1668
1669         /*
1670          * With some switches, link might be already up at this point.
1671          * Because of this weird behavior, when we enable laser,
1672          * we may not get link. We need to handle this. We cannot
1673          * figure out which switch is misbehaving. So we are forced to
1674          * make a global change.
1675          */
1676
1677         /* Enabling Laser. */
1678         val64 = readq(&bar0->adapter_control);
1679         val64 |= ADAPTER_EOI_TX_ON;
1680         writeq(val64, &bar0->adapter_control);
1681
1682         /* SXE-002: Initialize link and activity LED */
1683         subid = nic->pdev->subsystem_device;
1684         if ((subid & 0xFF) >= 0x07) {
1685                 val64 = readq(&bar0->gpio_control);
1686                 val64 |= 0x0000800000000000ULL;
1687                 writeq(val64, &bar0->gpio_control);
1688                 val64 = 0x0411040400000000ULL;
1689                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1690         }
1691
1692         /*
1693          * Don't see link state interrupts on certain switches, so
1694          * directly scheduling a link state task from here.
1695          */
1696         schedule_work(&nic->set_link_task);
1697
1698         return SUCCESS;
1699 }
1700
1701 /**
1702  *  free_tx_buffers - Free all queued Tx buffers
1703  *  @nic : device private variable.
1704  *  Description:
1705  *  Free all queued Tx buffers.
1706  *  Return Value: void
1707 */
1708
1709 static void free_tx_buffers(struct s2io_nic *nic)
1710 {
1711         struct net_device *dev = nic->dev;
1712         struct sk_buff *skb;
1713         TxD_t *txdp;
1714         int i, j;
1715         mac_info_t *mac_control;
1716         struct config_param *config;
1717         int cnt = 0, frg_cnt;
1718
1719         mac_control = &nic->mac_control;
1720         config = &nic->config;
1721
1722         for (i = 0; i < config->tx_fifo_num; i++) {
1723                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1724                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1725                             list_virt_addr;
1726                         skb =
1727                             (struct sk_buff *) ((unsigned long) txdp->
1728                                                 Host_Control);
1729                         if (skb == NULL) {
1730                                 memset(txdp, 0, sizeof(TxD_t) *
1731                                        config->max_txds);
1732                                 continue;
1733                         }
1734                         frg_cnt = skb_shinfo(skb)->nr_frags;
1735                         pci_unmap_single(nic->pdev, (dma_addr_t)
1736                                          txdp->Buffer_Pointer,
1737                                          skb->len - skb->data_len,
1738                                          PCI_DMA_TODEVICE);
1739                         if (frg_cnt) {
1740                                 TxD_t *temp;
1741                                 temp = txdp;
1742                                 txdp++;
1743                                 for (j = 0; j < frg_cnt; j++, txdp++) {
1744                                         skb_frag_t *frag =
1745                                             &skb_shinfo(skb)->frags[j];
1746                                         pci_unmap_page(nic->pdev,
1747                                                        (dma_addr_t)
1748                                                        txdp->
1749                                                        Buffer_Pointer,
1750                                                        frag->size,
1751                                                        PCI_DMA_TODEVICE);
1752                                 }
1753                                 txdp = temp;
1754                         }
1755                         dev_kfree_skb(skb);
1756                         memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1757                         cnt++;
1758                 }
1759                 DBG_PRINT(INTR_DBG,
1760                           "%s:forcibly freeing %d skbs on FIFO%d\n",
1761                           dev->name, cnt, i);
1762                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1763                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1764         }
1765 }
1766
1767 /**
1768  *   stop_nic -  To stop the nic
1769  *   @nic ; device private variable.
1770  *   Description:
1771  *   This function does exactly the opposite of what the start_nic()
1772  *   function does. This function is called to stop the device.
1773  *   Return Value:
1774  *   void.
1775  */
1776
1777 static void stop_nic(struct s2io_nic *nic)
1778 {
1779         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1780         register u64 val64 = 0;
1781         u16 interruptible, i;
1782         mac_info_t *mac_control;
1783         struct config_param *config;
1784
1785         mac_control = &nic->mac_control;
1786         config = &nic->config;
1787
1788         /*  Disable all interrupts */
1789         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1790             RX_MAC_INTR | MC_INTR;
1791         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1792
1793         /*  Disable PRCs */
1794         for (i = 0; i < config->rx_ring_num; i++) {
1795                 val64 = readq(&bar0->prc_ctrl_n[i]);
1796                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1797                 writeq(val64, &bar0->prc_ctrl_n[i]);
1798         }
1799 }
1800
1801 /**
1802  *  fill_rx_buffers - Allocates the Rx side skbs
1803  *  @nic:  device private variable
1804  *  @ring_no: ring number
1805  *  Description:
1806  *  The function allocates Rx side skbs and puts the physical
1807  *  address of these buffers into the RxD buffer pointers, so that the NIC
1808  *  can DMA the received frame into these locations.
1809  *  The NIC supports 3 receive modes, viz
1810  *  1. single buffer,
1811  *  2. three buffer and
1812  *  3. Five buffer modes.
1813  *  Each mode defines how many fragments the received frame will be split
1814  *  up into by the NIC. The frame is split into L3 header, L4 Header,
1815  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1816  *  is split into 3 fragments. As of now only single buffer mode is
1817  *  supported.
1818  *   Return Value:
1819  *  SUCCESS on success or an appropriate -ve value on failure.
1820  */
1821
1822 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1823 {
1824         struct net_device *dev = nic->dev;
1825         struct sk_buff *skb;
1826         RxD_t *rxdp;
1827         int off, off1, size, block_no, block_no1;
1828         int offset, offset1;
1829         u32 alloc_tab = 0;
1830         u32 alloc_cnt;
1831         mac_info_t *mac_control;
1832         struct config_param *config;
1833 #ifdef CONFIG_2BUFF_MODE
1834         RxD_t *rxdpnext;
1835         int nextblk;
1836         u64 tmp;
1837         buffAdd_t *ba;
1838         dma_addr_t rxdpphys;
1839 #endif
1840 #ifndef CONFIG_S2IO_NAPI
1841         unsigned long flags;
1842 #endif
1843
1844         mac_control = &nic->mac_control;
1845         config = &nic->config;
1846         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1847             atomic_read(&nic->rx_bufs_left[ring_no]);
1848         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1849             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1850
1851         while (alloc_tab < alloc_cnt) {
1852                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1853                     block_index;
1854                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1855                     block_index;
1856                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1857                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1858 #ifndef CONFIG_2BUFF_MODE
1859                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1860                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1861 #else
1862                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1863                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1864 #endif
1865
1866                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1867                     block_virt_addr + off;
1868                 if ((offset == offset1) && (rxdp->Host_Control)) {
1869                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1870                         DBG_PRINT(INTR_DBG, " info equated\n");
1871                         goto end;
1872                 }
1873 #ifndef CONFIG_2BUFF_MODE
1874                 if (rxdp->Control_1 == END_OF_BLOCK) {
1875                         mac_control->rings[ring_no].rx_curr_put_info.
1876                             block_index++;
1877                         mac_control->rings[ring_no].rx_curr_put_info.
1878                             block_index %= mac_control->rings[ring_no].block_count;
1879                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
1880                                 block_index;
1881                         off++;
1882                         off %= (MAX_RXDS_PER_BLOCK + 1);
1883                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1884                             off;
1885                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1886                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1887                                   dev->name, rxdp);
1888                 }
1889 #ifndef CONFIG_S2IO_NAPI
1890                 spin_lock_irqsave(&nic->put_lock, flags);
1891                 mac_control->rings[ring_no].put_pos =
1892                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1893                 spin_unlock_irqrestore(&nic->put_lock, flags);
1894 #endif
1895 #else
1896                 if (rxdp->Host_Control == END_OF_BLOCK) {
1897                         mac_control->rings[ring_no].rx_curr_put_info.
1898                             block_index++;
1899                         mac_control->rings[ring_no].rx_curr_put_info.block_index
1900                             %= mac_control->rings[ring_no].block_count;
1901                         block_no = mac_control->rings[ring_no].rx_curr_put_info
1902                             .block_index;
1903                         off = 0;
1904                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1905                                   dev->name, block_no,
1906                                   (unsigned long long) rxdp->Control_1);
1907                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1908                             off;
1909                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1910                             block_virt_addr;
1911                 }
1912 #ifndef CONFIG_S2IO_NAPI
1913                 spin_lock_irqsave(&nic->put_lock, flags);
1914                 mac_control->rings[ring_no].put_pos = (block_no *
1915                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
1916                 spin_unlock_irqrestore(&nic->put_lock, flags);
1917 #endif
1918 #endif
1919
1920 #ifndef CONFIG_2BUFF_MODE
1921                 if (rxdp->Control_1 & RXD_OWN_XENA)
1922 #else
1923                 if (rxdp->Control_2 & BIT(0))
1924 #endif
1925                 {
1926                         mac_control->rings[ring_no].rx_curr_put_info.
1927                             offset = off;
1928                         goto end;
1929                 }
1930 #ifdef  CONFIG_2BUFF_MODE
1931                 /*
1932                  * RxDs Spanning cache lines will be replenished only
1933                  * if the succeeding RxD is also owned by Host. It
1934                  * will always be the ((8*i)+3) and ((8*i)+6)
1935                  * descriptors for the 48 byte descriptor. The offending
1936                  * decsriptor is of-course the 3rd descriptor.
1937                  */
1938                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1939                     block_dma_addr + (off * sizeof(RxD_t));
1940                 if (((u64) (rxdpphys)) % 128 > 80) {
1941                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1942                             block_virt_addr + (off + 1);
1943                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
1944                                 nextblk = (block_no + 1) %
1945                                     (mac_control->rings[ring_no].block_count);
1946                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
1947                                     [nextblk].block_virt_addr;
1948                         }
1949                         if (rxdpnext->Control_2 & BIT(0))
1950                                 goto end;
1951                 }
1952 #endif
1953
1954 #ifndef CONFIG_2BUFF_MODE
1955                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1956 #else
1957                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1958 #endif
1959                 if (!skb) {
1960                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1961                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1962                         return -ENOMEM;
1963                 }
1964 #ifndef CONFIG_2BUFF_MODE
1965                 skb_reserve(skb, NET_IP_ALIGN);
1966                 memset(rxdp, 0, sizeof(RxD_t));
1967                 rxdp->Buffer0_ptr = pci_map_single
1968                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1969                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1970                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1971                 rxdp->Host_Control = (unsigned long) (skb);
1972                 rxdp->Control_1 |= RXD_OWN_XENA;
1973                 off++;
1974                 off %= (MAX_RXDS_PER_BLOCK + 1);
1975                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1976 #else
1977                 ba = &mac_control->rings[ring_no].ba[block_no][off];
1978                 skb_reserve(skb, BUF0_LEN);
1979                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1980                 if (tmp)
1981                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1982
1983                 memset(rxdp, 0, sizeof(RxD_t));
1984                 rxdp->Buffer2_ptr = pci_map_single
1985                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1986                      PCI_DMA_FROMDEVICE);
1987                 rxdp->Buffer0_ptr =
1988                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1989                                    PCI_DMA_FROMDEVICE);
1990                 rxdp->Buffer1_ptr =
1991                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1992                                    PCI_DMA_FROMDEVICE);
1993
1994                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1995                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1996                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1997                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
1998                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1999                 rxdp->Control_1 |= RXD_OWN_XENA;
2000                 off++;
2001                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2002 #endif
2003                 rxdp->Control_2 |= SET_RXD_MARKER;
2004
2005                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2006                 alloc_tab++;
2007         }
2008
2009       end:
2010         return SUCCESS;
2011 }
2012
2013 /**
2014  *  free_rx_buffers - Frees all Rx buffers
2015  *  @sp: device private variable.
2016  *  Description:
2017  *  This function will free all Rx buffers allocated by host.
2018  *  Return Value:
2019  *  NONE.
2020  */
2021
2022 static void free_rx_buffers(struct s2io_nic *sp)
2023 {
2024         struct net_device *dev = sp->dev;
2025         int i, j, blk = 0, off, buf_cnt = 0;
2026         RxD_t *rxdp;
2027         struct sk_buff *skb;
2028         mac_info_t *mac_control;
2029         struct config_param *config;
2030 #ifdef CONFIG_2BUFF_MODE
2031         buffAdd_t *ba;
2032 #endif
2033
2034         mac_control = &sp->mac_control;
2035         config = &sp->config;
2036
2037         for (i = 0; i < config->rx_ring_num; i++) {
2038                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2039                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2040                         rxdp = mac_control->rings[i].rx_blocks[blk].
2041                                 block_virt_addr + off;
2042
2043 #ifndef CONFIG_2BUFF_MODE
2044                         if (rxdp->Control_1 == END_OF_BLOCK) {
2045                                 rxdp =
2046                                     (RxD_t *) ((unsigned long) rxdp->
2047                                                Control_2);
2048                                 j++;
2049                                 blk++;
2050                         }
2051 #else
2052                         if (rxdp->Host_Control == END_OF_BLOCK) {
2053                                 blk++;
2054                                 continue;
2055                         }
2056 #endif
2057
2058                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2059                                 memset(rxdp, 0, sizeof(RxD_t));
2060                                 continue;
2061                         }
2062
2063                         skb =
2064                             (struct sk_buff *) ((unsigned long) rxdp->
2065                                                 Host_Control);
2066                         if (skb) {
2067 #ifndef CONFIG_2BUFF_MODE
2068                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2069                                                  rxdp->Buffer0_ptr,
2070                                                  dev->mtu +
2071                                                  HEADER_ETHERNET_II_802_3_SIZE
2072                                                  + HEADER_802_2_SIZE +
2073                                                  HEADER_SNAP_SIZE,
2074                                                  PCI_DMA_FROMDEVICE);
2075 #else
2076                                 ba = &mac_control->rings[i].ba[blk][off];
2077                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2078                                                  rxdp->Buffer0_ptr,
2079                                                  BUF0_LEN,
2080                                                  PCI_DMA_FROMDEVICE);
2081                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2082                                                  rxdp->Buffer1_ptr,
2083                                                  BUF1_LEN,
2084                                                  PCI_DMA_FROMDEVICE);
2085                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2086                                                  rxdp->Buffer2_ptr,
2087                                                  dev->mtu + BUF0_LEN + 4,
2088                                                  PCI_DMA_FROMDEVICE);
2089 #endif
2090                                 dev_kfree_skb(skb);
2091                                 atomic_dec(&sp->rx_bufs_left[i]);
2092                                 buf_cnt++;
2093                         }
2094                         memset(rxdp, 0, sizeof(RxD_t));
2095                 }
2096                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2097                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2098                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2099                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2100                 atomic_set(&sp->rx_bufs_left[i], 0);
2101                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2102                           dev->name, buf_cnt, i);
2103         }
2104 }
2105
2106 /**
2107  * s2io_poll - Rx interrupt handler for NAPI support
2108  * @dev : pointer to the device structure.
2109  * @budget : The number of packets that were budgeted to be processed
2110  * during  one pass through the 'Poll" function.
2111  * Description:
2112  * Comes into picture only if NAPI support has been incorporated. It does
2113  * the same thing that rx_intr_handler does, but not in a interrupt context
2114  * also It will process only a given number of packets.
2115  * Return value:
2116  * 0 on success and 1 if there are No Rx packets to be processed.
2117  */
2118
2119 #if defined(CONFIG_S2IO_NAPI)
2120 static int s2io_poll(struct net_device *dev, int *budget)
2121 {
2122         nic_t *nic = dev->priv;
2123         int pkt_cnt = 0, org_pkts_to_process;
2124         mac_info_t *mac_control;
2125         struct config_param *config;
2126         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2127         u64 val64;
2128         int i;
2129
2130         atomic_inc(&nic->isr_cnt);
2131         mac_control = &nic->mac_control;
2132         config = &nic->config;
2133
2134         nic->pkts_to_process = *budget;
2135         if (nic->pkts_to_process > dev->quota)
2136                 nic->pkts_to_process = dev->quota;
2137         org_pkts_to_process = nic->pkts_to_process;
2138
2139         val64 = readq(&bar0->rx_traffic_int);
2140         writeq(val64, &bar0->rx_traffic_int);
2141
2142         for (i = 0; i < config->rx_ring_num; i++) {
2143                 rx_intr_handler(&mac_control->rings[i]);
2144                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2145                 if (!nic->pkts_to_process) {
2146                         /* Quota for the current iteration has been met */
2147                         goto no_rx;
2148                 }
2149         }
2150         if (!pkt_cnt)
2151                 pkt_cnt = 1;
2152
2153         dev->quota -= pkt_cnt;
2154         *budget -= pkt_cnt;
2155         netif_rx_complete(dev);
2156
2157         for (i = 0; i < config->rx_ring_num; i++) {
2158                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2159                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2160                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2161                         break;
2162                 }
2163         }
2164         /* Re enable the Rx interrupts. */
2165         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2166         atomic_dec(&nic->isr_cnt);
2167         return 0;
2168
2169 no_rx:
2170         dev->quota -= pkt_cnt;
2171         *budget -= pkt_cnt;
2172
2173         for (i = 0; i < config->rx_ring_num; i++) {
2174                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2175                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2176                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2177                         break;
2178                 }
2179         }
2180         atomic_dec(&nic->isr_cnt);
2181         return 1;
2182 }
2183 #endif
2184
2185 /**
2186  *  rx_intr_handler - Rx interrupt handler
2187  *  @nic: device private variable.
2188  *  Description:
2189  *  If the interrupt is because of a received frame or if the
2190  *  receive ring contains fresh as yet un-processed frames,this function is
2191  *  called. It picks out the RxD at which place the last Rx processing had
2192  *  stopped and sends the skb to the OSM's Rx handler and then increments
2193  *  the offset.
2194  *  Return Value:
2195  *  NONE.
2196  */
2197 static void rx_intr_handler(ring_info_t *ring_data)
2198 {
2199         nic_t *nic = ring_data->nic;
2200         struct net_device *dev = (struct net_device *) nic->dev;
2201         int get_block, get_offset, put_block, put_offset, ring_bufs;
2202         rx_curr_get_info_t get_info, put_info;
2203         RxD_t *rxdp;
2204         struct sk_buff *skb;
2205 #ifndef CONFIG_S2IO_NAPI
2206         int pkt_cnt = 0;
2207 #endif
2208         spin_lock(&nic->rx_lock);
2209         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2210                 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2211                           __FUNCTION__, dev->name);
2212                 spin_unlock(&nic->rx_lock);
2213         }
2214
2215         get_info = ring_data->rx_curr_get_info;
2216         get_block = get_info.block_index;
2217         put_info = ring_data->rx_curr_put_info;
2218         put_block = put_info.block_index;
2219         ring_bufs = get_info.ring_len+1;
2220         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2221                     get_info.offset;
2222         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2223                 get_info.offset;
2224 #ifndef CONFIG_S2IO_NAPI
2225         spin_lock(&nic->put_lock);
2226         put_offset = ring_data->put_pos;
2227         spin_unlock(&nic->put_lock);
2228 #else
2229         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2230                 put_info.offset;
2231 #endif
2232         while (RXD_IS_UP2DT(rxdp) &&
2233                (((get_offset + 1) % ring_bufs) != put_offset)) {
2234                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2235                 if (skb == NULL) {
2236                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2237                                   dev->name);
2238                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2239                         spin_unlock(&nic->rx_lock);
2240                         return;
2241                 }
2242 #ifndef CONFIG_2BUFF_MODE
2243                 pci_unmap_single(nic->pdev, (dma_addr_t)
2244                                  rxdp->Buffer0_ptr,
2245                                  dev->mtu +
2246                                  HEADER_ETHERNET_II_802_3_SIZE +
2247                                  HEADER_802_2_SIZE +
2248                                  HEADER_SNAP_SIZE,
2249                                  PCI_DMA_FROMDEVICE);
2250 #else
2251                 pci_unmap_single(nic->pdev, (dma_addr_t)
2252                                  rxdp->Buffer0_ptr,
2253                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2254                 pci_unmap_single(nic->pdev, (dma_addr_t)
2255                                  rxdp->Buffer1_ptr,
2256                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2257                 pci_unmap_single(nic->pdev, (dma_addr_t)
2258                                  rxdp->Buffer2_ptr,
2259                                  dev->mtu + BUF0_LEN + 4,
2260                                  PCI_DMA_FROMDEVICE);
2261 #endif
2262                 rx_osm_handler(ring_data, rxdp);
2263                 get_info.offset++;
2264                 ring_data->rx_curr_get_info.offset =
2265                     get_info.offset;
2266                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2267                     get_info.offset;
2268                 if (get_info.offset &&
2269                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2270                         get_info.offset = 0;
2271                         ring_data->rx_curr_get_info.offset
2272                             = get_info.offset;
2273                         get_block++;
2274                         get_block %= ring_data->block_count;
2275                         ring_data->rx_curr_get_info.block_index
2276                             = get_block;
2277                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2278                 }
2279
2280                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2281                             get_info.offset;
2282 #ifdef CONFIG_S2IO_NAPI
2283                 nic->pkts_to_process -= 1;
2284                 if (!nic->pkts_to_process)
2285                         break;
2286 #else
2287                 pkt_cnt++;
2288                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2289                         break;
2290 #endif
2291         }
2292         spin_unlock(&nic->rx_lock);
2293 }
2294
2295 /**
2296  *  tx_intr_handler - Transmit interrupt handler
2297  *  @nic : device private variable
2298  *  Description:
2299  *  If an interrupt was raised to indicate DMA complete of the
2300  *  Tx packet, this function is called. It identifies the last TxD
2301  *  whose buffer was freed and frees all skbs whose data have already
2302  *  DMA'ed into the NICs internal memory.
2303  *  Return Value:
2304  *  NONE
2305  */
2306
2307 static void tx_intr_handler(fifo_info_t *fifo_data)
2308 {
2309         nic_t *nic = fifo_data->nic;
2310         struct net_device *dev = (struct net_device *) nic->dev;
2311         tx_curr_get_info_t get_info, put_info;
2312         struct sk_buff *skb;
2313         TxD_t *txdlp;
2314         u16 j, frg_cnt;
2315
2316         get_info = fifo_data->tx_curr_get_info;
2317         put_info = fifo_data->tx_curr_put_info;
2318         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2319             list_virt_addr;
2320         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2321                (get_info.offset != put_info.offset) &&
2322                (txdlp->Host_Control)) {
2323                 /* Check for TxD errors */
2324                 if (txdlp->Control_1 & TXD_T_CODE) {
2325                         unsigned long long err;
2326                         err = txdlp->Control_1 & TXD_T_CODE;
2327                         DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2328                                   err);
2329                 }
2330
2331                 skb = (struct sk_buff *) ((unsigned long)
2332                                 txdlp->Host_Control);
2333                 if (skb == NULL) {
2334                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2335                         __FUNCTION__);
2336                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2337                         return;
2338                 }
2339
2340                 frg_cnt = skb_shinfo(skb)->nr_frags;
2341                 nic->tx_pkt_count++;
2342
2343                 pci_unmap_single(nic->pdev, (dma_addr_t)
2344                                  txdlp->Buffer_Pointer,
2345                                  skb->len - skb->data_len,
2346                                  PCI_DMA_TODEVICE);
2347                 if (frg_cnt) {
2348                         TxD_t *temp;
2349                         temp = txdlp;
2350                         txdlp++;
2351                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2352                                 skb_frag_t *frag =
2353                                     &skb_shinfo(skb)->frags[j];
2354                                 pci_unmap_page(nic->pdev,
2355                                                (dma_addr_t)
2356                                                txdlp->
2357                                                Buffer_Pointer,
2358                                                frag->size,
2359                                                PCI_DMA_TODEVICE);
2360                         }
2361                         txdlp = temp;
2362                 }
2363                 memset(txdlp, 0,
2364                        (sizeof(TxD_t) * fifo_data->max_txds));
2365
2366                 /* Updating the statistics block */
2367                 nic->stats.tx_bytes += skb->len;
2368                 dev_kfree_skb_irq(skb);
2369
2370                 get_info.offset++;
2371                 get_info.offset %= get_info.fifo_len + 1;
2372                 txdlp = (TxD_t *) fifo_data->list_info
2373                     [get_info.offset].list_virt_addr;
2374                 fifo_data->tx_curr_get_info.offset =
2375                     get_info.offset;
2376         }
2377
2378         spin_lock(&nic->tx_lock);
2379         if (netif_queue_stopped(dev))
2380                 netif_wake_queue(dev);
2381         spin_unlock(&nic->tx_lock);
2382 }
2383
2384 /**
2385  *  alarm_intr_handler - Alarm Interrrupt handler
2386  *  @nic: device private variable
2387  *  Description: If the interrupt was neither because of Rx packet or Tx
2388  *  complete, this function is called. If the interrupt was to indicate
2389  *  a loss of link, the OSM link status handler is invoked for any other
2390  *  alarm interrupt the block that raised the interrupt is displayed
2391  *  and a H/W reset is issued.
2392  *  Return Value:
2393  *  NONE
2394 */
2395
2396 static void alarm_intr_handler(struct s2io_nic *nic)
2397 {
2398         struct net_device *dev = (struct net_device *) nic->dev;
2399         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2400         register u64 val64 = 0, err_reg = 0;
2401
2402         /* Handling link status change error Intr */
2403         err_reg = readq(&bar0->mac_rmac_err_reg);
2404         writeq(err_reg, &bar0->mac_rmac_err_reg);
2405         if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2406                 schedule_work(&nic->set_link_task);
2407         }
2408
2409         /* Handling Ecc errors */
2410         val64 = readq(&bar0->mc_err_reg);
2411         writeq(val64, &bar0->mc_err_reg);
2412         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2413                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2414                         nic->mac_control.stats_info->sw_stat.
2415                                 double_ecc_errs++;
2416                         DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2417                                   dev->name);
2418                         DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2419                         netif_stop_queue(dev);
2420                         schedule_work(&nic->rst_timer_task);
2421                 } else {
2422                         nic->mac_control.stats_info->sw_stat.
2423                                 single_ecc_errs++;
2424                 }
2425         }
2426
2427         /* In case of a serious error, the device will be Reset. */
2428         val64 = readq(&bar0->serr_source);
2429         if (val64 & SERR_SOURCE_ANY) {
2430                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2431                 DBG_PRINT(ERR_DBG, "serious error!!\n");
2432                 netif_stop_queue(dev);
2433                 schedule_work(&nic->rst_timer_task);
2434         }
2435
2436         /*
2437          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2438          * Error occurs, the adapter will be recycled by disabling the
2439          * adapter enable bit and enabling it again after the device
2440          * becomes Quiescent.
2441          */
2442         val64 = readq(&bar0->pcc_err_reg);
2443         writeq(val64, &bar0->pcc_err_reg);
2444         if (val64 & PCC_FB_ECC_DB_ERR) {
2445                 u64 ac = readq(&bar0->adapter_control);
2446                 ac &= ~(ADAPTER_CNTL_EN);
2447                 writeq(ac, &bar0->adapter_control);
2448                 ac = readq(&bar0->adapter_control);
2449                 schedule_work(&nic->set_link_task);
2450         }
2451
2452         /* Other type of interrupts are not being handled now,  TODO */
2453 }
2454
2455 /**
2456  *  wait_for_cmd_complete - waits for a command to complete.
2457  *  @sp : private member of the device structure, which is a pointer to the
2458  *  s2io_nic structure.
2459  *  Description: Function that waits for a command to Write into RMAC
2460  *  ADDR DATA registers to be completed and returns either success or
2461  *  error depending on whether the command was complete or not.
2462  *  Return value:
2463  *   SUCCESS on success and FAILURE on failure.
2464  */
2465
2466 int wait_for_cmd_complete(nic_t * sp)
2467 {
2468         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2469         int ret = FAILURE, cnt = 0;
2470         u64 val64;
2471
2472         while (TRUE) {
2473                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2474                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2475                         ret = SUCCESS;
2476                         break;
2477                 }
2478                 msleep(50);
2479                 if (cnt++ > 10)
2480                         break;
2481         }
2482
2483         return ret;
2484 }
2485
2486 /**
2487  *  s2io_reset - Resets the card.
2488  *  @sp : private member of the device structure.
2489  *  Description: Function to Reset the card. This function then also
2490  *  restores the previously saved PCI configuration space registers as
2491  *  the card reset also resets the configuration space.
2492  *  Return value:
2493  *  void.
2494  */
2495
2496 void s2io_reset(nic_t * sp)
2497 {
2498         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2499         u64 val64;
2500         u16 subid, pci_cmd;
2501
2502         val64 = SW_RESET_ALL;
2503         writeq(val64, &bar0->sw_reset);
2504
2505         /*
2506          * At this stage, if the PCI write is indeed completed, the
2507          * card is reset and so is the PCI Config space of the device.
2508          * So a read cannot be issued at this stage on any of the
2509          * registers to ensure the write into "sw_reset" register
2510          * has gone through.
2511          * Question: Is there any system call that will explicitly force
2512          * all the write commands still pending on the bus to be pushed
2513          * through?
2514          * As of now I'am just giving a 250ms delay and hoping that the
2515          * PCI write to sw_reset register is done by this time.
2516          */
2517         msleep(250);
2518
2519         /* Restore the PCI state saved during initializarion. */
2520         pci_restore_state(sp->pdev);
2521
2522         s2io_init_pci(sp);
2523
2524         msleep(250);
2525
2526         /* Set swapper to enable I/O register access */
2527         s2io_set_swapper(sp);
2528
2529         /* Clear certain PCI/PCI-X fields after reset */
2530         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2531         pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2532         pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2533
2534         val64 = readq(&bar0->txpic_int_reg);
2535         val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2536         writeq(val64, &bar0->txpic_int_reg);
2537
2538         /* Clearing PCIX Ecc status register */
2539         pci_write_config_dword(sp->pdev, 0x68, 0);
2540
2541         /* Reset device statistics maintained by OS */
2542         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2543
2544         /* SXE-002: Configure link and activity LED to turn it off */
2545         subid = sp->pdev->subsystem_device;
2546         if ((subid & 0xFF) >= 0x07) {
2547                 val64 = readq(&bar0->gpio_control);
2548                 val64 |= 0x0000800000000000ULL;
2549                 writeq(val64, &bar0->gpio_control);
2550                 val64 = 0x0411040400000000ULL;
2551                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2552         }
2553
2554         sp->device_enabled_once = FALSE;
2555 }
2556
2557 /**
2558  *  s2io_set_swapper - to set the swapper controle on the card
2559  *  @sp : private member of the device structure,
2560  *  pointer to the s2io_nic structure.
2561  *  Description: Function to set the swapper control on the card
2562  *  correctly depending on the 'endianness' of the system.
2563  *  Return value:
2564  *  SUCCESS on success and FAILURE on failure.
2565  */
2566
2567 int s2io_set_swapper(nic_t * sp)
2568 {
2569         struct net_device *dev = sp->dev;
2570         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2571         u64 val64, valt, valr;
2572
2573         /*
2574          * Set proper endian settings and verify the same by reading
2575          * the PIF Feed-back register.
2576          */
2577
2578         val64 = readq(&bar0->pif_rd_swapper_fb);
2579         if (val64 != 0x0123456789ABCDEFULL) {
2580                 int i = 0;
2581                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2582                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2583                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2584                                 0};                     /* FE=0, SE=0 */
2585
2586                 while(i<4) {
2587                         writeq(value[i], &bar0->swapper_ctrl);
2588                         val64 = readq(&bar0->pif_rd_swapper_fb);
2589                         if (val64 == 0x0123456789ABCDEFULL)
2590                                 break;
2591                         i++;
2592                 }
2593                 if (i == 4) {
2594                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2595                                 dev->name);
2596                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2597                                 (unsigned long long) val64);
2598                         return FAILURE;
2599                 }
2600                 valr = value[i];
2601         } else {
2602                 valr = readq(&bar0->swapper_ctrl);
2603         }
2604
2605         valt = 0x0123456789ABCDEFULL;
2606         writeq(valt, &bar0->xmsi_address);
2607         val64 = readq(&bar0->xmsi_address);
2608
2609         if(val64 != valt) {
2610                 int i = 0;
2611                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2612                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2613                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2614                                 0};                     /* FE=0, SE=0 */
2615
2616                 while(i<4) {
2617                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2618                         writeq(valt, &bar0->xmsi_address);
2619                         val64 = readq(&bar0->xmsi_address);
2620                         if(val64 == valt)
2621                                 break;
2622                         i++;
2623                 }
2624                 if(i == 4) {
2625                         unsigned long long x = val64;
2626                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2627                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2628                         return FAILURE;
2629                 }
2630         }
2631         val64 = readq(&bar0->swapper_ctrl);
2632         val64 &= 0xFFFF000000000000ULL;
2633
2634 #ifdef  __BIG_ENDIAN
2635         /*
2636          * The device by default set to a big endian format, so a
2637          * big endian driver need not set anything.
2638          */
2639         val64 |= (SWAPPER_CTRL_TXP_FE |
2640                  SWAPPER_CTRL_TXP_SE |
2641                  SWAPPER_CTRL_TXD_R_FE |
2642                  SWAPPER_CTRL_TXD_W_FE |
2643                  SWAPPER_CTRL_TXF_R_FE |
2644                  SWAPPER_CTRL_RXD_R_FE |
2645                  SWAPPER_CTRL_RXD_W_FE |
2646                  SWAPPER_CTRL_RXF_W_FE |
2647                  SWAPPER_CTRL_XMSI_FE |
2648                  SWAPPER_CTRL_XMSI_SE |
2649                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2650         writeq(val64, &bar0->swapper_ctrl);
2651 #else
2652         /*
2653          * Initially we enable all bits to make it accessible by the
2654          * driver, then we selectively enable only those bits that
2655          * we want to set.
2656          */
2657         val64 |= (SWAPPER_CTRL_TXP_FE |
2658                  SWAPPER_CTRL_TXP_SE |
2659                  SWAPPER_CTRL_TXD_R_FE |
2660                  SWAPPER_CTRL_TXD_R_SE |
2661                  SWAPPER_CTRL_TXD_W_FE |
2662                  SWAPPER_CTRL_TXD_W_SE |
2663                  SWAPPER_CTRL_TXF_R_FE |
2664                  SWAPPER_CTRL_RXD_R_FE |
2665                  SWAPPER_CTRL_RXD_R_SE |
2666                  SWAPPER_CTRL_RXD_W_FE |
2667                  SWAPPER_CTRL_RXD_W_SE |
2668                  SWAPPER_CTRL_RXF_W_FE |
2669                  SWAPPER_CTRL_XMSI_FE |
2670                  SWAPPER_CTRL_XMSI_SE |
2671                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2672         writeq(val64, &bar0->swapper_ctrl);
2673 #endif
2674         val64 = readq(&bar0->swapper_ctrl);
2675
2676         /*
2677          * Verifying if endian settings are accurate by reading a
2678          * feedback register.
2679          */
2680         val64 = readq(&bar0->pif_rd_swapper_fb);
2681         if (val64 != 0x0123456789ABCDEFULL) {
2682                 /* Endian settings are incorrect, calls for another dekko. */
2683                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2684                           dev->name);
2685                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2686                           (unsigned long long) val64);
2687                 return FAILURE;
2688         }
2689
2690         return SUCCESS;
2691 }
2692
2693 /* ********************************************************* *
2694  * Functions defined below concern the OS part of the driver *
2695  * ********************************************************* */
2696
2697 /**
2698  *  s2io_open - open entry point of the driver
2699  *  @dev : pointer to the device structure.
2700  *  Description:
2701  *  This function is the open entry point of the driver. It mainly calls a
2702  *  function to allocate Rx buffers and inserts them into the buffer
2703  *  descriptors and then enables the Rx part of the NIC.
2704  *  Return value:
2705  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2706  *   file on failure.
2707  */
2708
2709 int s2io_open(struct net_device *dev)
2710 {
2711         nic_t *sp = dev->priv;
2712         int err = 0;
2713
2714         /*
2715          * Make sure you have link off by default every time
2716          * Nic is initialized
2717          */
2718         netif_carrier_off(dev);
2719         sp->last_link_state = 0; /* Unkown link state */
2720
2721         /* Initialize H/W and enable interrupts */
2722         if (s2io_card_up(sp)) {
2723                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2724                           dev->name);
2725                 err = -ENODEV;
2726                 goto hw_init_failed;
2727         }
2728
2729         /* After proper initialization of H/W, register ISR */
2730         err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2731                           sp->name, dev);
2732         if (err) {
2733                 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2734                           dev->name);
2735                 goto isr_registration_failed;
2736         }
2737
2738         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2739                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2740                 err = -ENODEV;
2741                 goto setting_mac_address_failed;
2742         }
2743
2744         netif_start_queue(dev);
2745         return 0;
2746
2747 setting_mac_address_failed:
2748         free_irq(sp->pdev->irq, dev);
2749 isr_registration_failed:
2750         del_timer_sync(&sp->alarm_timer);
2751         s2io_reset(sp);
2752 hw_init_failed:
2753         return err;
2754 }
2755
2756 /**
2757  *  s2io_close -close entry point of the driver
2758  *  @dev : device pointer.
2759  *  Description:
2760  *  This is the stop entry point of the driver. It needs to undo exactly
2761  *  whatever was done by the open entry point,thus it's usually referred to
2762  *  as the close function.Among other things this function mainly stops the
2763  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2764  *  Return value:
2765  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2766  *  file on failure.
2767  */
2768
2769 int s2io_close(struct net_device *dev)
2770 {
2771         nic_t *sp = dev->priv;
2772         flush_scheduled_work();
2773         netif_stop_queue(dev);
2774         /* Reset card, kill tasklet and free Tx and Rx buffers. */
2775         s2io_card_down(sp);
2776
2777         free_irq(sp->pdev->irq, dev);
2778         sp->device_close_flag = TRUE;   /* Device is shut down. */
2779         return 0;
2780 }
2781
2782 /**
2783  *  s2io_xmit - Tx entry point of te driver
2784  *  @skb : the socket buffer containing the Tx data.
2785  *  @dev : device pointer.
2786  *  Description :
2787  *  This function is the Tx entry point of the driver. S2IO NIC supports
2788  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
2789  *  NOTE: when device cant queue the pkt,just the trans_start variable will
2790  *  not be upadted.
2791  *  Return value:
2792  *  0 on success & 1 on failure.
2793  */
2794
2795 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2796 {
2797         nic_t *sp = dev->priv;
2798         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2799         register u64 val64;
2800         TxD_t *txdp;
2801         TxFIFO_element_t __iomem *tx_fifo;
2802         unsigned long flags;
2803 #ifdef NETIF_F_TSO
2804         int mss;
2805 #endif
2806         mac_info_t *mac_control;
2807         struct config_param *config;
2808
2809         mac_control = &sp->mac_control;
2810         config = &sp->config;
2811
2812         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2813         spin_lock_irqsave(&sp->tx_lock, flags);
2814         if (atomic_read(&sp->card_state) == CARD_DOWN) {
2815                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2816                           dev->name);
2817                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2818                 dev_kfree_skb(skb);
2819                 return 0;
2820         }
2821
2822         queue = 0;
2823
2824         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2825         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2826         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2827                 list_virt_addr;
2828
2829         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2830         /* Avoid "put" pointer going beyond "get" pointer */
2831         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2832                 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2833                 netif_stop_queue(dev);
2834                 dev_kfree_skb(skb);
2835                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2836                 return 0;
2837         }
2838 #ifdef NETIF_F_TSO
2839         mss = skb_shinfo(skb)->tso_size;
2840         if (mss) {
2841                 txdp->Control_1 |= TXD_TCP_LSO_EN;
2842                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2843         }
2844 #endif
2845
2846         frg_cnt = skb_shinfo(skb)->nr_frags;
2847         frg_len = skb->len - skb->data_len;
2848
2849         txdp->Buffer_Pointer = pci_map_single
2850             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2851         txdp->Host_Control = (unsigned long) skb;
2852         if (skb->ip_summed == CHECKSUM_HW) {
2853                 txdp->Control_2 |=
2854                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2855                      TXD_TX_CKO_UDP_EN);
2856         }
2857
2858         txdp->Control_2 |= config->tx_intr_type;
2859
2860         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2861                             TXD_GATHER_CODE_FIRST);
2862         txdp->Control_1 |= TXD_LIST_OWN_XENA;
2863
2864         /* For fragmented SKB. */
2865         for (i = 0; i < frg_cnt; i++) {
2866                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2867                 txdp++;
2868                 txdp->Buffer_Pointer = (u64) pci_map_page
2869                     (sp->pdev, frag->page, frag->page_offset,
2870                      frag->size, PCI_DMA_TODEVICE);
2871                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2872         }
2873         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2874
2875         tx_fifo = mac_control->tx_FIFO_start[queue];
2876         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2877         writeq(val64, &tx_fifo->TxDL_Pointer);
2878
2879         wmb();
2880
2881         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2882                  TX_FIFO_LAST_LIST);
2883
2884 #ifdef NETIF_F_TSO
2885         if (mss)
2886                 val64 |= TX_FIFO_SPECIAL_FUNC;
2887 #endif
2888         writeq(val64, &tx_fifo->List_Control);
2889
2890         put_off++;
2891         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2892         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2893
2894         /* Avoid "put" pointer going beyond "get" pointer */
2895         if (((put_off + 1) % queue_len) == get_off) {
2896                 DBG_PRINT(TX_DBG,
2897                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2898                           put_off, get_off);
2899                 netif_stop_queue(dev);
2900         }
2901
2902         dev->trans_start = jiffies;
2903         spin_unlock_irqrestore(&sp->tx_lock, flags);
2904
2905         return 0;
2906 }
2907
2908 static void
2909 s2io_alarm_handle(unsigned long data)
2910 {
2911         nic_t *sp = (nic_t *)data;
2912
2913         alarm_intr_handler(sp);
2914         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
2915 }
2916
2917 /**
2918  *  s2io_isr - ISR handler of the device .
2919  *  @irq: the irq of the device.
2920  *  @dev_id: a void pointer to the dev structure of the NIC.
2921  *  @pt_regs: pointer to the registers pushed on the stack.
2922  *  Description:  This function is the ISR handler of the device. It
2923  *  identifies the reason for the interrupt and calls the relevant
2924  *  service routines. As a contongency measure, this ISR allocates the
2925  *  recv buffers, if their numbers are below the panic value which is
2926  *  presently set to 25% of the original number of rcv buffers allocated.
2927  *  Return value:
2928  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
2929  *   IRQ_NONE: will be returned if interrupt is not from our device
2930  */
2931 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2932 {
2933         struct net_device *dev = (struct net_device *) dev_id;
2934         nic_t *sp = dev->priv;
2935         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2936         int i;
2937         u64 reason = 0, val64;
2938         mac_info_t *mac_control;
2939         struct config_param *config;
2940
2941         atomic_inc(&sp->isr_cnt);
2942         mac_control = &sp->mac_control;
2943         config = &sp->config;
2944
2945         /*
2946          * Identify the cause for interrupt and call the appropriate
2947          * interrupt handler. Causes for the interrupt could be;
2948          * 1. Rx of packet.
2949          * 2. Tx complete.
2950          * 3. Link down.
2951          * 4. Error in any functional blocks of the NIC.
2952          */
2953         reason = readq(&bar0->general_int_status);
2954
2955         if (!reason) {
2956                 /* The interrupt was not raised by Xena. */
2957                 atomic_dec(&sp->isr_cnt);
2958                 return IRQ_NONE;
2959         }
2960
2961 #ifdef CONFIG_S2IO_NAPI
2962         if (reason & GEN_INTR_RXTRAFFIC) {
2963                 if (netif_rx_schedule_prep(dev)) {
2964                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2965                                               DISABLE_INTRS);
2966                         __netif_rx_schedule(dev);
2967                 }
2968         }
2969 #else
2970         /* If Intr is because of Rx Traffic */
2971         if (reason & GEN_INTR_RXTRAFFIC) {
2972                 /*
2973                  * rx_traffic_int reg is an R1 register, writing all 1's
2974                  * will ensure that the actual interrupt causing bit get's
2975                  * cleared and hence a read can be avoided.
2976                  */
2977                 val64 = 0xFFFFFFFFFFFFFFFFULL;
2978                 writeq(val64, &bar0->rx_traffic_int);
2979                 for (i = 0; i < config->rx_ring_num; i++) {
2980                         rx_intr_handler(&mac_control->rings[i]);
2981                 }
2982         }
2983 #endif
2984
2985         /* If Intr is because of Tx Traffic */
2986         if (reason & GEN_INTR_TXTRAFFIC) {
2987                 /*
2988                  * tx_traffic_int reg is an R1 register, writing all 1's
2989                  * will ensure that the actual interrupt causing bit get's
2990                  * cleared and hence a read can be avoided.
2991                  */
2992                 val64 = 0xFFFFFFFFFFFFFFFFULL;
2993                 writeq(val64, &bar0->tx_traffic_int);
2994
2995                 for (i = 0; i < config->tx_fifo_num; i++)
2996                         tx_intr_handler(&mac_control->fifos[i]);
2997         }
2998
2999         /*
3000          * If the Rx buffer count is below the panic threshold then
3001          * reallocate the buffers from the interrupt handler itself,
3002          * else schedule a tasklet to reallocate the buffers.
3003          */
3004 #ifndef CONFIG_S2IO_NAPI
3005         for (i = 0; i < config->rx_ring_num; i++) {
3006                 int ret;
3007                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3008                 int level = rx_buffer_level(sp, rxb_size, i);
3009
3010                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3011                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3012                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3013                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3014                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3015                                           dev->name);
3016                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3017                                 clear_bit(0, (&sp->tasklet_status));
3018                                 atomic_dec(&sp->isr_cnt);
3019                                 return IRQ_HANDLED;
3020                         }
3021                         clear_bit(0, (&sp->tasklet_status));
3022                 } else if (level == LOW) {
3023                         tasklet_schedule(&sp->task);
3024                 }
3025         }
3026 #endif
3027
3028         atomic_dec(&sp->isr_cnt);
3029         return IRQ_HANDLED;
3030 }
3031
3032 /**
3033  * s2io_updt_stats -
3034  */
3035 static void s2io_updt_stats(nic_t *sp)
3036 {
3037         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3038         u64 val64;
3039         int cnt = 0;
3040
3041         if (atomic_read(&sp->card_state) == CARD_UP) {
3042                 /* Apprx 30us on a 133 MHz bus */
3043                 val64 = SET_UPDT_CLICKS(10) |
3044                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3045                 writeq(val64, &bar0->stat_cfg);
3046                 do {
3047                         udelay(100);
3048                         val64 = readq(&bar0->stat_cfg);
3049                         if (!(val64 & BIT(0)))
3050                                 break;
3051                         cnt++;
3052                         if (cnt == 5)
3053                                 break; /* Updt failed */
3054                 } while(1);
3055         }
3056 }
3057
3058 /**
3059  *  s2io_get_stats - Updates the device statistics structure.
3060  *  @dev : pointer to the device structure.
3061  *  Description:
3062  *  This function updates the device statistics structure in the s2io_nic
3063  *  structure and returns a pointer to the same.
3064  *  Return value:
3065  *  pointer to the updated net_device_stats structure.
3066  */
3067
3068 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3069 {
3070         nic_t *sp = dev->priv;
3071         mac_info_t *mac_control;
3072         struct config_param *config;
3073
3074
3075         mac_control = &sp->mac_control;
3076         config = &sp->config;
3077
3078         /* Configure Stats for immediate updt */
3079         s2io_updt_stats(sp);
3080
3081         sp->stats.tx_packets =
3082                 le32_to_cpu(mac_control->stats_info->tmac_frms);
3083         sp->stats.tx_errors =
3084                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3085         sp->stats.rx_errors =
3086                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3087         sp->stats.multicast =
3088                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3089         sp->stats.rx_length_errors =
3090                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3091
3092         return (&sp->stats);
3093 }
3094
3095 /**
3096  *  s2io_set_multicast - entry point for multicast address enable/disable.
3097  *  @dev : pointer to the device structure
3098  *  Description:
3099  *  This function is a driver entry point which gets called by the kernel
3100  *  whenever multicast addresses must be enabled/disabled. This also gets
3101  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3102  *  determine, if multicast address must be enabled or if promiscuous mode
3103  *  is to be disabled etc.
3104  *  Return value:
3105  *  void.
3106  */
3107
3108 static void s2io_set_multicast(struct net_device *dev)
3109 {
3110         int i, j, prev_cnt;
3111         struct dev_mc_list *mclist;
3112         nic_t *sp = dev->priv;
3113         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3114         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3115             0xfeffffffffffULL;
3116         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3117         void __iomem *add;
3118
3119         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3120                 /*  Enable all Multicast addresses */
3121                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3122                        &bar0->rmac_addr_data0_mem);
3123                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3124                        &bar0->rmac_addr_data1_mem);
3125                 val64 = RMAC_ADDR_CMD_MEM_WE |
3126                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3127                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3128                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3129                 /* Wait till command completes */
3130                 wait_for_cmd_complete(sp);
3131
3132                 sp->m_cast_flg = 1;
3133                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3134         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3135                 /*  Disable all Multicast addresses */
3136                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3137                        &bar0->rmac_addr_data0_mem);
3138                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3139                        &bar0->rmac_addr_data1_mem);
3140                 val64 = RMAC_ADDR_CMD_MEM_WE |
3141                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3142                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3143                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3144                 /* Wait till command completes */
3145                 wait_for_cmd_complete(sp);
3146
3147                 sp->m_cast_flg = 0;
3148                 sp->all_multi_pos = 0;
3149         }
3150
3151         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3152                 /*  Put the NIC into promiscuous mode */
3153                 add = &bar0->mac_cfg;
3154                 val64 = readq(&bar0->mac_cfg);
3155                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3156
3157                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3158                 writel((u32) val64, add);
3159                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3160                 writel((u32) (val64 >> 32), (add + 4));
3161
3162                 val64 = readq(&bar0->mac_cfg);
3163                 sp->promisc_flg = 1;
3164                 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3165                           dev->name);
3166         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3167                 /*  Remove the NIC from promiscuous mode */
3168                 add = &bar0->mac_cfg;
3169                 val64 = readq(&bar0->mac_cfg);
3170                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3171
3172                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3173                 writel((u32) val64, add);
3174                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3175                 writel((u32) (val64 >> 32), (add + 4));
3176
3177                 val64 = readq(&bar0->mac_cfg);
3178                 sp->promisc_flg = 0;
3179                 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3180                           dev->name);
3181         }
3182
3183         /*  Update individual M_CAST address list */
3184         if ((!sp->m_cast_flg) && dev->mc_count) {
3185                 if (dev->mc_count >
3186                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3187                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3188                                   dev->name);
3189                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
3190                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3191                         return;
3192                 }
3193
3194                 prev_cnt = sp->mc_addr_count;
3195                 sp->mc_addr_count = dev->mc_count;
3196
3197                 /* Clear out the previous list of Mc in the H/W. */
3198                 for (i = 0; i < prev_cnt; i++) {
3199                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3200                                &bar0->rmac_addr_data0_mem);
3201                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3202                                 &bar0->rmac_addr_data1_mem);
3203                         val64 = RMAC_ADDR_CMD_MEM_WE |
3204                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3205                             RMAC_ADDR_CMD_MEM_OFFSET
3206                             (MAC_MC_ADDR_START_OFFSET + i);
3207                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3208
3209                         /* Wait for command completes */
3210                         if (wait_for_cmd_complete(sp)) {
3211                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3212                                           dev->name);
3213                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3214                                 return;
3215                         }
3216                 }
3217
3218                 /* Create the new Rx filter list and update the same in H/W. */
3219                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3220                      i++, mclist = mclist->next) {
3221                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3222                                ETH_ALEN);
3223                         for (j = 0; j < ETH_ALEN; j++) {
3224                                 mac_addr |= mclist->dmi_addr[j];
3225                                 mac_addr <<= 8;
3226                         }
3227                         mac_addr >>= 8;
3228                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3229                                &bar0->rmac_addr_data0_mem);
3230                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3231                                 &bar0->rmac_addr_data1_mem);
3232                         val64 = RMAC_ADDR_CMD_MEM_WE |
3233                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3234                             RMAC_ADDR_CMD_MEM_OFFSET
3235                             (i + MAC_MC_ADDR_START_OFFSET);
3236                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3237
3238                         /* Wait for command completes */
3239                         if (wait_for_cmd_complete(sp)) {
3240                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3241                                           dev->name);
3242                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3243                                 return;
3244                         }
3245                 }
3246         }
3247 }
3248
3249 /**
3250  *  s2io_set_mac_addr - Programs the Xframe mac address
3251  *  @dev : pointer to the device structure.
3252  *  @addr: a uchar pointer to the new mac address which is to be set.
3253  *  Description : This procedure will program the Xframe to receive
3254  *  frames with new Mac Address
3255  *  Return value: SUCCESS on success and an appropriate (-)ve integer
3256  *  as defined in errno.h file on failure.
3257  */
3258
3259 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3260 {
3261         nic_t *sp = dev->priv;
3262         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3263         register u64 val64, mac_addr = 0;
3264         int i;
3265
3266         /*
3267          * Set the new MAC address as the new unicast filter and reflect this
3268          * change on the device address registered with the OS. It will be
3269          * at offset 0.
3270          */
3271         for (i = 0; i < ETH_ALEN; i++) {
3272                 mac_addr <<= 8;
3273                 mac_addr |= addr[i];
3274         }
3275
3276         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3277                &bar0->rmac_addr_data0_mem);
3278
3279         val64 =
3280             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3281             RMAC_ADDR_CMD_MEM_OFFSET(0);
3282         writeq(val64, &bar0->rmac_addr_cmd_mem);
3283         /* Wait till command completes */
3284         if (wait_for_cmd_complete(sp)) {
3285                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3286                 return FAILURE;
3287         }
3288
3289         return SUCCESS;
3290 }
3291
3292 /**
3293  * s2io_ethtool_sset - Sets different link parameters.
3294  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
3295  * @info: pointer to the structure with parameters given by ethtool to set
3296  * link information.
3297  * Description:
3298  * The function sets different link parameters provided by the user onto
3299  * the NIC.
3300  * Return value:
3301  * 0 on success.
3302 */
3303
3304 static int s2io_ethtool_sset(struct net_device *dev,
3305                              struct ethtool_cmd *info)
3306 {
3307         nic_t *sp = dev->priv;
3308         if ((info->autoneg == AUTONEG_ENABLE) ||
3309             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3310                 return -EINVAL;
3311         else {
3312                 s2io_close(sp->dev);
3313                 s2io_open(sp->dev);
3314         }
3315
3316         return 0;
3317 }
3318
3319 /**
3320  * s2io_ethtol_gset - Return link specific information.
3321  * @sp : private member of the device structure, pointer to the
3322  *      s2io_nic structure.
3323  * @info : pointer to the structure with parameters given by ethtool
3324  * to return link information.
3325  * Description:
3326  * Returns link specific information like speed, duplex etc.. to ethtool.
3327  * Return value :
3328  * return 0 on success.
3329  */
3330
3331 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3332 {
3333         nic_t *sp = dev->priv;
3334         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3335         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3336         info->port = PORT_FIBRE;
3337         /* info->transceiver?? TODO */
3338
3339         if (netif_carrier_ok(sp->dev)) {
3340                 info->speed = 10000;
3341                 info->duplex = DUPLEX_FULL;
3342         } else {
3343                 info->speed = -1;
3344                 info->duplex = -1;
3345         }
3346
3347         info->autoneg = AUTONEG_DISABLE;
3348         return 0;
3349 }
3350
3351 /**
3352  * s2io_ethtool_gdrvinfo - Returns driver specific information.
3353  * @sp : private member of the device structure, which is a pointer to the
3354  * s2io_nic structure.
3355  * @info : pointer to the structure with parameters given by ethtool to
3356  * return driver information.
3357  * Description:
3358  * Returns driver specefic information like name, version etc.. to ethtool.
3359  * Return value:
3360  *  void
3361  */
3362
3363 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3364                                   struct ethtool_drvinfo *info)
3365 {
3366         nic_t *sp = dev->priv;
3367
3368         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3369         strncpy(info->version, s2io_driver_version,
3370                 sizeof(s2io_driver_version));
3371         strncpy(info->fw_version, "", 32);
3372         strncpy(info->bus_info, pci_name(sp->pdev), 32);
3373         info->regdump_len = XENA_REG_SPACE;
3374         info->eedump_len = XENA_EEPROM_SPACE;
3375         info->testinfo_len = S2IO_TEST_LEN;
3376         info->n_stats = S2IO_STAT_LEN;
3377 }
3378
3379 /**
3380  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3381  *  @sp: private member of the device structure, which is a pointer to the
3382  *  s2io_nic structure.
3383  *  @regs : pointer to the structure with parameters given by ethtool for
3384  *  dumping the registers.
3385  *  @reg_space: The input argumnet into which all the registers are dumped.
3386  *  Description:
3387  *  Dumps the entire register space of xFrame NIC into the user given
3388  *  buffer area.
3389  * Return value :
3390  * void .
3391 */
3392
3393 static void s2io_ethtool_gregs(struct net_device *dev,
3394                                struct ethtool_regs *regs, void *space)
3395 {
3396         int i;
3397         u64 reg;
3398         u8 *reg_space = (u8 *) space;
3399         nic_t *sp = dev->priv;
3400
3401         regs->len = XENA_REG_SPACE;
3402         regs->version = sp->pdev->subsystem_device;
3403
3404         for (i = 0; i < regs->len; i += 8) {
3405                 reg = readq(sp->bar0 + i);
3406                 memcpy((reg_space + i), &reg, 8);
3407         }
3408 }
3409
3410 /**
3411  *  s2io_phy_id  - timer function that alternates adapter LED.
3412  *  @data : address of the private member of the device structure, which
3413  *  is a pointer to the s2io_nic structure, provided as an u32.
3414  * Description: This is actually the timer function that alternates the
3415  * adapter LED bit of the adapter control bit to set/reset every time on
3416  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3417  *  once every second.
3418 */
3419 static void s2io_phy_id(unsigned long data)
3420 {
3421         nic_t *sp = (nic_t *) data;
3422         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3423         u64 val64 = 0;
3424         u16 subid;
3425
3426         subid = sp->pdev->subsystem_device;
3427         if ((subid & 0xFF) >= 0x07) {
3428                 val64 = readq(&bar0->gpio_control);
3429                 val64 ^= GPIO_CTRL_GPIO_0;
3430                 writeq(val64, &bar0->gpio_control);
3431         } else {
3432                 val64 = readq(&bar0->adapter_control);
3433                 val64 ^= ADAPTER_LED_ON;
3434                 writeq(val64, &bar0->adapter_control);
3435         }
3436
3437         mod_timer(&sp->id_timer, jiffies + HZ / 2);
3438 }
3439
3440 /**
3441  * s2io_ethtool_idnic - To physically identify the nic on the system.
3442  * @sp : private member of the device structure, which is a pointer to the
3443  * s2io_nic structure.
3444  * @id : pointer to the structure with identification parameters given by
3445  * ethtool.
3446  * Description: Used to physically identify the NIC on the system.
3447  * The Link LED will blink for a time specified by the user for
3448  * identification.
3449  * NOTE: The Link has to be Up to be able to blink the LED. Hence
3450  * identification is possible only if it's link is up.
3451  * Return value:
3452  * int , returns 0 on success
3453  */
3454
3455 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3456 {
3457         u64 val64 = 0, last_gpio_ctrl_val;
3458         nic_t *sp = dev->priv;
3459         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3460         u16 subid;
3461
3462         subid = sp->pdev->subsystem_device;
3463         last_gpio_ctrl_val = readq(&bar0->gpio_control);
3464         if ((subid & 0xFF) < 0x07) {
3465                 val64 = readq(&bar0->adapter_control);
3466                 if (!(val64 & ADAPTER_CNTL_EN)) {
3467                         printk(KERN_ERR
3468                                "Adapter Link down, cannot blink LED\n");
3469                         return -EFAULT;
3470                 }
3471         }
3472         if (sp->id_timer.function == NULL) {
3473                 init_timer(&sp->id_timer);
3474                 sp->id_timer.function = s2io_phy_id;
3475                 sp->id_timer.data = (unsigned long) sp;
3476         }
3477         mod_timer(&sp->id_timer, jiffies);
3478         if (data)
3479                 msleep_interruptible(data * HZ);
3480         else
3481                 msleep_interruptible(MAX_FLICKER_TIME);
3482         del_timer_sync(&sp->id_timer);
3483
3484         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3485                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3486                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3487         }
3488
3489         return 0;
3490 }
3491
3492 /**
3493  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3494  * @sp : private member of the device structure, which is a pointer to the
3495  *      s2io_nic structure.
3496  * @ep : pointer to the structure with pause parameters given by ethtool.
3497  * Description:
3498  * Returns the Pause frame generation and reception capability of the NIC.
3499  * Return value:
3500  *  void
3501  */
3502 static void s2io_ethtool_getpause_data(struct net_device *dev,
3503                                        struct ethtool_pauseparam *ep)
3504 {
3505         u64 val64;
3506         nic_t *sp = dev->priv;
3507         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3508
3509         val64 = readq(&bar0->rmac_pause_cfg);
3510         if (val64 & RMAC_PAUSE_GEN_ENABLE)
3511                 ep->tx_pause = TRUE;
3512         if (val64 & RMAC_PAUSE_RX_ENABLE)
3513                 ep->rx_pause = TRUE;
3514         ep->autoneg = FALSE;
3515 }
3516
3517 /**
3518  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
3519  * @sp : private member of the device structure, which is a pointer to the
3520  *      s2io_nic structure.
3521  * @ep : pointer to the structure with pause parameters given by ethtool.
3522  * Description:
3523  * It can be used to set or reset Pause frame generation or reception
3524  * support of the NIC.
3525  * Return value:
3526  * int, returns 0 on Success
3527  */
3528
3529 static int s2io_ethtool_setpause_data(struct net_device *dev,
3530                                struct ethtool_pauseparam *ep)
3531 {
3532         u64 val64;
3533         nic_t *sp = dev->priv;
3534         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3535
3536         val64 = readq(&bar0->rmac_pause_cfg);
3537         if (ep->tx_pause)
3538                 val64 |= RMAC_PAUSE_GEN_ENABLE;
3539         else
3540                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3541         if (ep->rx_pause)
3542                 val64 |= RMAC_PAUSE_RX_ENABLE;
3543         else
3544                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3545         writeq(val64, &bar0->rmac_pause_cfg);
3546         return 0;
3547 }
3548
3549 /**
3550  * read_eeprom - reads 4 bytes of data from user given offset.
3551  * @sp : private member of the device structure, which is a pointer to the
3552  *      s2io_nic structure.
3553  * @off : offset at which the data must be written
3554  * @data : Its an output parameter where the data read at the given
3555  *      offset is stored.
3556  * Description:
3557  * Will read 4 bytes of data from the user given offset and return the
3558  * read data.
3559  * NOTE: Will allow to read only part of the EEPROM visible through the
3560  *   I2C bus.
3561  * Return value:
3562  *  -1 on failure and 0 on success.
3563  */
3564
3565 #define S2IO_DEV_ID             5
3566 static int read_eeprom(nic_t * sp, int off, u32 * data)
3567 {
3568         int ret = -1;
3569         u32 exit_cnt = 0;
3570         u64 val64;
3571         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3572
3573         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3574             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3575             I2C_CONTROL_CNTL_START;
3576         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3577
3578         while (exit_cnt < 5) {
3579                 val64 = readq(&bar0->i2c_control);
3580                 if (I2C_CONTROL_CNTL_END(val64)) {
3581                         *data = I2C_CONTROL_GET_DATA(val64);
3582                         ret = 0;
3583                         break;
3584                 }
3585                 msleep(50);
3586                 exit_cnt++;
3587         }
3588
3589         return ret;
3590 }
3591
3592 /**
3593  *  write_eeprom - actually writes the relevant part of the data value.
3594  *  @sp : private member of the device structure, which is a pointer to the
3595  *       s2io_nic structure.
3596  *  @off : offset at which the data must be written
3597  *  @data : The data that is to be written
3598  *  @cnt : Number of bytes of the data that are actually to be written into
3599  *  the Eeprom. (max of 3)
3600  * Description:
3601  *  Actually writes the relevant part of the data value into the Eeprom
3602  *  through the I2C bus.
3603  * Return value:
3604  *  0 on success, -1 on failure.
3605  */
3606
3607 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3608 {
3609         int exit_cnt = 0, ret = -1;
3610         u64 val64;
3611         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3612
3613         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3614             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3615             I2C_CONTROL_CNTL_START;
3616         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3617
3618         while (exit_cnt < 5) {
3619                 val64 = readq(&bar0->i2c_control);
3620                 if (I2C_CONTROL_CNTL_END(val64)) {
3621                         if (!(val64 & I2C_CONTROL_NACK))
3622                                 ret = 0;
3623                         break;
3624                 }
3625                 msleep(50);
3626                 exit_cnt++;
3627         }
3628
3629         return ret;
3630 }
3631
3632 /**
3633  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
3634  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
3635  *  @eeprom : pointer to the user level structure provided by ethtool,
3636  *  containing all relevant information.
3637  *  @data_buf : user defined value to be written into Eeprom.
3638  *  Description: Reads the values stored in the Eeprom at given offset
3639  *  for a given length. Stores these values int the input argument data
3640  *  buffer 'data_buf' and returns these to the caller (ethtool.)
3641  *  Return value:
3642  *  int  0 on success
3643  */
3644
3645 static int s2io_ethtool_geeprom(struct net_device *dev,
3646                          struct ethtool_eeprom *eeprom, u8 * data_buf)
3647 {
3648         u32 data, i, valid;
3649         nic_t *sp = dev->priv;
3650
3651         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3652
3653         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3654                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3655
3656         for (i = 0; i < eeprom->len; i += 4) {
3657                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3658                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3659                         return -EFAULT;
3660                 }
3661                 valid = INV(data);
3662                 memcpy((data_buf + i), &valid, 4);
3663         }
3664         return 0;
3665 }
3666
3667 /**
3668  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3669  *  @sp : private member of the device structure, which is a pointer to the
3670  *  s2io_nic structure.
3671  *  @eeprom : pointer to the user level structure provided by ethtool,
3672  *  containing all relevant information.
3673  *  @data_buf ; user defined value to be written into Eeprom.
3674  *  Description:
3675  *  Tries to write the user provided value in the Eeprom, at the offset
3676  *  given by the user.
3677  *  Return value:
3678  *  0 on success, -EFAULT on failure.
3679  */
3680
3681 static int s2io_ethtool_seeprom(struct net_device *dev,
3682                                 struct ethtool_eeprom *eeprom,
3683                                 u8 * data_buf)
3684 {
3685         int len = eeprom->len, cnt = 0;
3686         u32 valid = 0, data;
3687         nic_t *sp = dev->priv;
3688
3689         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3690                 DBG_PRINT(ERR_DBG,
3691                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3692                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3693                           eeprom->magic);
3694                 return -EFAULT;
3695         }
3696
3697         while (len) {
3698                 data = (u32) data_buf[cnt] & 0x000000FF;
3699                 if (data) {
3700                         valid = (u32) (data << 24);
3701                 } else
3702                         valid = data;
3703
3704                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3705                         DBG_PRINT(ERR_DBG,
3706                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3707                         DBG_PRINT(ERR_DBG,
3708                                   "write into the specified offset\n");
3709                         return -EFAULT;
3710                 }
3711                 cnt++;
3712                 len--;
3713         }
3714
3715         return 0;
3716 }
3717
3718 /**
3719  * s2io_register_test - reads and writes into all clock domains.
3720  * @sp : private member of the device structure, which is a pointer to the
3721  * s2io_nic structure.
3722  * @data : variable that returns the result of each of the test conducted b
3723  * by the driver.
3724  * Description:
3725  * Read and write into all clock domains. The NIC has 3 clock domains,
3726  * see that registers in all the three regions are accessible.
3727  * Return value:
3728  * 0 on success.
3729  */
3730
3731 static int s2io_register_test(nic_t * sp, uint64_t * data)
3732 {
3733         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3734         u64 val64 = 0;
3735         int fail = 0;
3736
3737         val64 = readq(&bar0->pif_rd_swapper_fb);
3738         if (val64 != 0x123456789abcdefULL) {
3739                 fail = 1;
3740                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3741         }
3742
3743         val64 = readq(&bar0->rmac_pause_cfg);
3744         if (val64 != 0xc000ffff00000000ULL) {
3745                 fail = 1;
3746                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3747         }
3748
3749         val64 = readq(&bar0->rx_queue_cfg);
3750         if (val64 != 0x0808080808080808ULL) {
3751                 fail = 1;
3752                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3753         }
3754
3755         val64 = readq(&bar0->xgxs_efifo_cfg);
3756         if (val64 != 0x000000001923141EULL) {
3757                 fail = 1;
3758                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3759         }
3760
3761         val64 = 0x5A5A5A5A5A5A5A5AULL;
3762         writeq(val64, &bar0->xmsi_data);
3763         val64 = readq(&bar0->xmsi_data);
3764         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3765                 fail = 1;
3766                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3767         }
3768
3769         val64 = 0xA5A5A5A5A5A5A5A5ULL;
3770         writeq(val64, &bar0->xmsi_data);
3771         val64 = readq(&bar0->xmsi_data);
3772         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3773                 fail = 1;
3774                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3775         }
3776
3777         *data = fail;
3778         return 0;
3779 }
3780
3781 /**
3782  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3783  * @sp : private member of the device structure, which is a pointer to the
3784  * s2io_nic structure.
3785  * @data:variable that returns the result of each of the test conducted by
3786  * the driver.
3787  * Description:
3788  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3789  * register.
3790  * Return value:
3791  * 0 on success.
3792  */
3793
3794 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3795 {
3796         int fail = 0;
3797         u32 ret_data;
3798
3799         /* Test Write Error at offset 0 */
3800         if (!write_eeprom(sp, 0, 0, 3))
3801                 fail = 1;
3802
3803         /* Test Write at offset 4f0 */
3804         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3805                 fail = 1;
3806         if (read_eeprom(sp, 0x4F0, &ret_data))
3807                 fail = 1;
3808
3809         if (ret_data != 0x01234567)
3810                 fail = 1;
3811
3812         /* Reset the EEPROM data go FFFF */
3813         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3814
3815         /* Test Write Request Error at offset 0x7c */
3816         if (!write_eeprom(sp, 0x07C, 0, 3))
3817                 fail = 1;
3818
3819         /* Test Write Request at offset 0x7fc */
3820         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3821                 fail = 1;
3822         if (read_eeprom(sp, 0x7FC, &ret_data))
3823                 fail = 1;
3824
3825         if (ret_data != 0x01234567)
3826                 fail = 1;
3827
3828         /* Reset the EEPROM data go FFFF */
3829         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3830
3831         /* Test Write Error at offset 0x80 */
3832         if (!write_eeprom(sp, 0x080, 0, 3))
3833                 fail = 1;
3834
3835         /* Test Write Error at offset 0xfc */
3836         if (!write_eeprom(sp, 0x0FC, 0, 3))
3837                 fail = 1;
3838
3839         /* Test Write Error at offset 0x100 */
3840         if (!write_eeprom(sp, 0x100, 0, 3))
3841                 fail = 1;
3842
3843         /* Test Write Error at offset 4ec */
3844         if (!write_eeprom(sp, 0x4EC, 0, 3))
3845                 fail = 1;
3846
3847         *data = fail;
3848         return 0;
3849 }
3850
3851 /**
3852  * s2io_bist_test - invokes the MemBist test of the card .
3853  * @sp : private member of the device structure, which is a pointer to the
3854  * s2io_nic structure.
3855  * @data:variable that returns the result of each of the test conducted by
3856  * the driver.
3857  * Description:
3858  * This invokes the MemBist test of the card. We give around
3859  * 2 secs time for the Test to complete. If it's still not complete
3860  * within this peiod, we consider that the test failed.
3861  * Return value:
3862  * 0 on success and -1 on failure.
3863  */
3864
3865 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3866 {
3867         u8 bist = 0;
3868         int cnt = 0, ret = -1;
3869
3870         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3871         bist |= PCI_BIST_START;
3872         pci_write_config_word(sp->pdev, PCI_BIST, bist);
3873
3874         while (cnt < 20) {
3875                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3876                 if (!(bist & PCI_BIST_START)) {
3877                         *data = (bist & PCI_BIST_CODE_MASK);
3878                         ret = 0;
3879                         break;
3880                 }
3881                 msleep(100);
3882                 cnt++;
3883         }
3884
3885         return ret;
3886 }
3887
3888 /**
3889  * s2io-link_test - verifies the link state of the nic
3890  * @sp ; private member of the device structure, which is a pointer to the
3891  * s2io_nic structure.
3892  * @data: variable that returns the result of each of the test conducted by
3893  * the driver.
3894  * Description:
3895  * The function verifies the link state of the NIC and updates the input
3896  * argument 'data' appropriately.
3897  * Return value:
3898  * 0 on success.
3899  */
3900
3901 static int s2io_link_test(nic_t * sp, uint64_t * data)
3902 {
3903         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3904         u64 val64;
3905
3906         val64 = readq(&bar0->adapter_status);
3907         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3908                 *data = 1;
3909
3910         return 0;
3911 }
3912
3913 /**
3914  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3915  * @sp - private member of the device structure, which is a pointer to the
3916  * s2io_nic structure.
3917  * @data - variable that returns the result of each of the test
3918  * conducted by the driver.
3919  * Description:
3920  *  This is one of the offline test that tests the read and write
3921  *  access to the RldRam chip on the NIC.
3922  * Return value:
3923  *  0 on success.
3924  */
3925
3926 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3927 {
3928         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3929         u64 val64;
3930         int cnt, iteration = 0, test_pass = 0;
3931
3932         val64 = readq(&bar0->adapter_control);
3933         val64 &= ~ADAPTER_ECC_EN;
3934         writeq(val64, &bar0->adapter_control);
3935
3936         val64 = readq(&bar0->mc_rldram_test_ctrl);
3937         val64 |= MC_RLDRAM_TEST_MODE;
3938         writeq(val64, &bar0->mc_rldram_test_ctrl);
3939
3940         val64 = readq(&bar0->mc_rldram_mrs);
3941         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3942         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3943
3944         val64 |= MC_RLDRAM_MRS_ENABLE;
3945         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3946
3947         while (iteration < 2) {
3948                 val64 = 0x55555555aaaa0000ULL;
3949                 if (iteration == 1) {
3950                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3951                 }
3952                 writeq(val64, &bar0->mc_rldram_test_d0);
3953
3954                 val64 = 0xaaaa5a5555550000ULL;
3955                 if (iteration == 1) {
3956                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3957                 }
3958                 writeq(val64, &bar0->mc_rldram_test_d1);
3959
3960                 val64 = 0x55aaaaaaaa5a0000ULL;
3961                 if (iteration == 1) {
3962                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3963                 }
3964                 writeq(val64, &bar0->mc_rldram_test_d2);
3965
3966                 val64 = (u64) (0x0000003fffff0000ULL);
3967                 writeq(val64, &bar0->mc_rldram_test_add);
3968
3969
3970                 val64 = MC_RLDRAM_TEST_MODE;
3971                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3972
3973                 val64 |=
3974                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3975                     MC_RLDRAM_TEST_GO;
3976                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3977
3978                 for (cnt = 0; cnt < 5; cnt++) {
3979                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3980                         if (val64 & MC_RLDRAM_TEST_DONE)
3981                                 break;
3982                         msleep(200);
3983                 }
3984
3985                 if (cnt == 5)
3986                         break;
3987
3988                 val64 = MC_RLDRAM_TEST_MODE;
3989                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3990
3991                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3992                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3993
3994                 for (cnt = 0; cnt < 5; cnt++) {
3995                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3996                         if (val64 & MC_RLDRAM_TEST_DONE)
3997                                 break;
3998                         msleep(500);
3999                 }
4000
4001                 if (cnt == 5)
4002                         break;
4003
4004                 val64 = readq(&bar0->mc_rldram_test_ctrl);
4005                 if (val64 & MC_RLDRAM_TEST_PASS)
4006                         test_pass = 1;
4007
4008                 iteration++;
4009         }
4010
4011         if (!test_pass)
4012                 *data = 1;
4013         else
4014                 *data = 0;
4015
4016         return 0;
4017 }
4018
4019 /**
4020  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4021  *  @sp : private member of the device structure, which is a pointer to the
4022  *  s2io_nic structure.
4023  *  @ethtest : pointer to a ethtool command specific structure that will be
4024  *  returned to the user.
4025  *  @data : variable that returns the result of each of the test
4026  * conducted by the driver.
4027  * Description:
4028  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
4029  *  the health of the card.
4030  * Return value:
4031  *  void
4032  */
4033
4034 static void s2io_ethtool_test(struct net_device *dev,
4035                               struct ethtool_test *ethtest,
4036                               uint64_t * data)
4037 {
4038         nic_t *sp = dev->priv;
4039         int orig_state = netif_running(sp->dev);
4040
4041         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4042                 /* Offline Tests. */
4043                 if (orig_state)
4044                         s2io_close(sp->dev);
4045
4046                 if (s2io_register_test(sp, &data[0]))
4047                         ethtest->flags |= ETH_TEST_FL_FAILED;
4048
4049                 s2io_reset(sp);
4050
4051                 if (s2io_rldram_test(sp, &data[3]))
4052                         ethtest->flags |= ETH_TEST_FL_FAILED;
4053
4054                 s2io_reset(sp);
4055
4056                 if (s2io_eeprom_test(sp, &data[1]))
4057                         ethtest->flags |= ETH_TEST_FL_FAILED;
4058
4059                 if (s2io_bist_test(sp, &data[4]))
4060                         ethtest->flags |= ETH_TEST_FL_FAILED;
4061
4062                 if (orig_state)
4063                         s2io_open(sp->dev);
4064
4065                 data[2] = 0;
4066         } else {
4067                 /* Online Tests. */
4068                 if (!orig_state) {
4069                         DBG_PRINT(ERR_DBG,
4070                                   "%s: is not up, cannot run test\n",
4071                                   dev->name);
4072                         data[0] = -1;
4073                         data[1] = -1;
4074                         data[2] = -1;
4075                         data[3] = -1;
4076                         data[4] = -1;
4077                 }
4078
4079                 if (s2io_link_test(sp, &data[2]))
4080                         ethtest->flags |= ETH_TEST_FL_FAILED;
4081
4082                 data[0] = 0;
4083                 data[1] = 0;
4084                 data[3] = 0;
4085                 data[4] = 0;
4086         }
4087 }
4088
4089 static void s2io_get_ethtool_stats(struct net_device *dev,
4090                                    struct ethtool_stats *estats,
4091                                    u64 * tmp_stats)
4092 {
4093         int i = 0;
4094         nic_t *sp = dev->priv;
4095         StatInfo_t *stat_info = sp->mac_control.stats_info;
4096
4097         s2io_updt_stats(sp);
4098         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4099         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4100         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4101         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4102         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4103         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4104         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4105         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4106         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4107         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4108         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4109         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4110         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4111         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4112         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4113         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4114         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4115         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4116         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4117         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4118         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4119         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4120         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4121         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4122         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4123         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4124         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4125         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4126         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4127         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4128         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4129         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4130         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4131         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4132         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4133         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4134         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4135         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4136         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4137         tmp_stats[i++] = 0;
4138         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4139         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4140 }
4141
4142 int s2io_ethtool_get_regs_len(struct net_device *dev)
4143 {
4144         return (XENA_REG_SPACE);
4145 }
4146
4147
4148 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4149 {
4150         nic_t *sp = dev->priv;
4151
4152         return (sp->rx_csum);
4153 }
4154 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4155 {
4156         nic_t *sp = dev->priv;
4157
4158         if (data)
4159                 sp->rx_csum = 1;
4160         else
4161                 sp->rx_csum = 0;
4162
4163         return 0;
4164 }
4165 int s2io_get_eeprom_len(struct net_device *dev)
4166 {
4167         return (XENA_EEPROM_SPACE);
4168 }
4169
4170 int s2io_ethtool_self_test_count(struct net_device *dev)
4171 {
4172         return (S2IO_TEST_LEN);
4173 }
4174 void s2io_ethtool_get_strings(struct net_device *dev,
4175                               u32 stringset, u8 * data)
4176 {
4177         switch (stringset) {
4178         case ETH_SS_TEST:
4179                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4180                 break;
4181         case ETH_SS_STATS:
4182                 memcpy(data, &ethtool_stats_keys,
4183                        sizeof(ethtool_stats_keys));
4184         }
4185 }
4186 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4187 {
4188         return (S2IO_STAT_LEN);
4189 }
4190
4191 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4192 {
4193         if (data)
4194                 dev->features |= NETIF_F_IP_CSUM;
4195         else
4196                 dev->features &= ~NETIF_F_IP_CSUM;
4197
4198         return 0;
4199 }
4200
4201
4202 static struct ethtool_ops netdev_ethtool_ops = {
4203         .get_settings = s2io_ethtool_gset,
4204         .set_settings = s2io_ethtool_sset,
4205         .get_drvinfo = s2io_ethtool_gdrvinfo,
4206         .get_regs_len = s2io_ethtool_get_regs_len,
4207         .get_regs = s2io_ethtool_gregs,
4208         .get_link = ethtool_op_get_link,
4209         .get_eeprom_len = s2io_get_eeprom_len,
4210         .get_eeprom = s2io_ethtool_geeprom,
4211         .set_eeprom = s2io_ethtool_seeprom,
4212         .get_pauseparam = s2io_ethtool_getpause_data,
4213         .set_pauseparam = s2io_ethtool_setpause_data,
4214         .get_rx_csum = s2io_ethtool_get_rx_csum,
4215         .set_rx_csum = s2io_ethtool_set_rx_csum,
4216         .get_tx_csum = ethtool_op_get_tx_csum,
4217         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4218         .get_sg = ethtool_op_get_sg,
4219         .set_sg = ethtool_op_set_sg,
4220 #ifdef NETIF_F_TSO
4221         .get_tso = ethtool_op_get_tso,
4222         .set_tso = ethtool_op_set_tso,
4223 #endif
4224         .self_test_count = s2io_ethtool_self_test_count,
4225         .self_test = s2io_ethtool_test,
4226         .get_strings = s2io_ethtool_get_strings,
4227         .phys_id = s2io_ethtool_idnic,
4228         .get_stats_count = s2io_ethtool_get_stats_count,
4229         .get_ethtool_stats = s2io_get_ethtool_stats
4230 };
4231
4232 /**
4233  *  s2io_ioctl - Entry point for the Ioctl
4234  *  @dev :  Device pointer.
4235  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
4236  *  a proprietary structure used to pass information to the driver.
4237  *  @cmd :  This is used to distinguish between the different commands that
4238  *  can be passed to the IOCTL functions.
4239  *  Description:
4240  *  Currently there are no special functionality supported in IOCTL, hence
4241  *  function always return EOPNOTSUPPORTED
4242  */
4243
4244 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4245 {
4246         return -EOPNOTSUPP;
4247 }
4248
4249 /**
4250  *  s2io_change_mtu - entry point to change MTU size for the device.
4251  *   @dev : device pointer.
4252  *   @new_mtu : the new MTU size for the device.
4253  *   Description: A driver entry point to change MTU size for the device.
4254  *   Before changing the MTU the device must be stopped.
4255  *  Return value:
4256  *   0 on success and an appropriate (-)ve integer as defined in errno.h
4257  *   file on failure.
4258  */
4259
4260 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4261 {
4262         nic_t *sp = dev->priv;
4263
4264         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4265                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4266                           dev->name);
4267                 return -EPERM;
4268         }
4269
4270         dev->mtu = new_mtu;
4271         if (netif_running(dev)) {
4272                 s2io_card_down(sp);
4273                 netif_stop_queue(dev);
4274                 if (s2io_card_up(sp)) {
4275                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4276                                   __FUNCTION__);
4277                 }
4278                 if (netif_queue_stopped(dev))
4279                         netif_wake_queue(dev);
4280         } else { /* Device is down */
4281                 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4282                 u64 val64 = new_mtu;
4283
4284                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4285         }
4286
4287         return 0;
4288 }
4289
4290 /**
4291  *  s2io_tasklet - Bottom half of the ISR.
4292  *  @dev_adr : address of the device structure in dma_addr_t format.
4293  *  Description:
4294  *  This is the tasklet or the bottom half of the ISR. This is
4295  *  an extension of the ISR which is scheduled by the scheduler to be run
4296  *  when the load on the CPU is low. All low priority tasks of the ISR can
4297  *  be pushed into the tasklet. For now the tasklet is used only to
4298  *  replenish the Rx buffers in the Rx buffer descriptors.
4299  *  Return value:
4300  *  void.
4301  */
4302
4303 static void s2io_tasklet(unsigned long dev_addr)
4304 {
4305         struct net_device *dev = (struct net_device *) dev_addr;
4306         nic_t *sp = dev->priv;
4307         int i, ret;
4308         mac_info_t *mac_control;
4309         struct config_param *config;
4310
4311         mac_control = &sp->mac_control;
4312         config = &sp->config;
4313
4314         if (!TASKLET_IN_USE) {
4315                 for (i = 0; i < config->rx_ring_num; i++) {
4316                         ret = fill_rx_buffers(sp, i);
4317                         if (ret == -ENOMEM) {
4318                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
4319                                           dev->name);
4320                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4321                                 break;
4322                         } else if (ret == -EFILL) {
4323                                 DBG_PRINT(ERR_DBG,
4324                                           "%s: Rx Ring %d is full\n",
4325                                           dev->name, i);
4326                                 break;
4327                         }
4328                 }
4329                 clear_bit(0, (&sp->tasklet_status));
4330         }
4331 }
4332
4333 /**
4334  * s2io_set_link - Set the LInk status
4335  * @data: long pointer to device private structue
4336  * Description: Sets the link status for the adapter
4337  */
4338
4339 static void s2io_set_link(unsigned long data)
4340 {
4341         nic_t *nic = (nic_t *) data;
4342         struct net_device *dev = nic->dev;
4343         XENA_dev_config_t __iomem *bar0 = nic->bar0;
4344         register u64 val64;
4345         u16 subid;
4346
4347         if (test_and_set_bit(0, &(nic->link_state))) {
4348                 /* The card is being reset, no point doing anything */
4349                 return;
4350         }
4351
4352         subid = nic->pdev->subsystem_device;
4353         /*
4354          * Allow a small delay for the NICs self initiated
4355          * cleanup to complete.
4356          */
4357         msleep(100);
4358
4359         val64 = readq(&bar0->adapter_status);
4360         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4361                 if (LINK_IS_UP(val64)) {
4362                         val64 = readq(&bar0->adapter_control);
4363                         val64 |= ADAPTER_CNTL_EN;
4364                         writeq(val64, &bar0->adapter_control);
4365                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4366                                 val64 = readq(&bar0->gpio_control);
4367                                 val64 |= GPIO_CTRL_GPIO_0;
4368                                 writeq(val64, &bar0->gpio_control);
4369                                 val64 = readq(&bar0->gpio_control);
4370                         } else {
4371                                 val64 |= ADAPTER_LED_ON;
4372                                 writeq(val64, &bar0->adapter_control);
4373                         }
4374                         val64 = readq(&bar0->adapter_status);
4375                         if (!LINK_IS_UP(val64)) {
4376                                 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4377                                 DBG_PRINT(ERR_DBG, " Link down");
4378                                 DBG_PRINT(ERR_DBG, "after ");
4379                                 DBG_PRINT(ERR_DBG, "enabling ");
4380                                 DBG_PRINT(ERR_DBG, "device \n");
4381                         }
4382                         if (nic->device_enabled_once == FALSE) {
4383                                 nic->device_enabled_once = TRUE;
4384                         }
4385                         s2io_link(nic, LINK_UP);
4386                 } else {
4387                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4388                                 val64 = readq(&bar0->gpio_control);
4389                                 val64 &= ~GPIO_CTRL_GPIO_0;
4390                                 writeq(val64, &bar0->gpio_control);
4391                                 val64 = readq(&bar0->gpio_control);
4392                         }
4393                         s2io_link(nic, LINK_DOWN);
4394                 }
4395         } else {                /* NIC is not Quiescent. */
4396                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4397                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4398                 netif_stop_queue(dev);
4399         }
4400         clear_bit(0, &(nic->link_state));
4401 }
4402
4403 static void s2io_card_down(nic_t * sp)
4404 {
4405         int cnt = 0;
4406         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4407         unsigned long flags;
4408         register u64 val64 = 0;
4409
4410         del_timer_sync(&sp->alarm_timer);
4411         /* If s2io_set_link task is executing, wait till it completes. */
4412         while (test_and_set_bit(0, &(sp->link_state))) {
4413                 msleep(50);
4414         }
4415         atomic_set(&sp->card_state, CARD_DOWN);
4416
4417         /* disable Tx and Rx traffic on the NIC */
4418         stop_nic(sp);
4419
4420         /* Kill tasklet. */
4421         tasklet_kill(&sp->task);
4422
4423         /* Check if the device is Quiescent and then Reset the NIC */
4424         do {
4425                 val64 = readq(&bar0->adapter_status);
4426                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4427                         break;
4428                 }
4429
4430                 msleep(50);
4431                 cnt++;
4432                 if (cnt == 10) {
4433                         DBG_PRINT(ERR_DBG,
4434                                   "s2io_close:Device not Quiescent ");
4435                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4436                                   (unsigned long long) val64);
4437                         break;
4438                 }
4439         } while (1);
4440         s2io_reset(sp);
4441
4442         /* Waiting till all Interrupt handlers are complete */
4443         cnt = 0;
4444         do {
4445                 msleep(10);
4446                 if (!atomic_read(&sp->isr_cnt))
4447                         break;
4448                 cnt++;
4449         } while(cnt < 5);
4450
4451         spin_lock_irqsave(&sp->tx_lock, flags);
4452         /* Free all Tx buffers */
4453         free_tx_buffers(sp);
4454         spin_unlock_irqrestore(&sp->tx_lock, flags);
4455
4456         /* Free all Rx buffers */
4457         spin_lock_irqsave(&sp->rx_lock, flags);
4458         free_rx_buffers(sp);
4459         spin_unlock_irqrestore(&sp->rx_lock, flags);
4460
4461         clear_bit(0, &(sp->link_state));
4462 }
4463
4464 static int s2io_card_up(nic_t * sp)
4465 {
4466         int i, ret;
4467         mac_info_t *mac_control;
4468         struct config_param *config;
4469         struct net_device *dev = (struct net_device *) sp->dev;
4470
4471         /* Initialize the H/W I/O registers */
4472         if (init_nic(sp) != 0) {
4473                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4474                           dev->name);
4475                 return -ENODEV;
4476         }
4477
4478         /*
4479          * Initializing the Rx buffers. For now we are considering only 1
4480          * Rx ring and initializing buffers into 30 Rx blocks
4481          */
4482         mac_control = &sp->mac_control;
4483         config = &sp->config;
4484
4485         for (i = 0; i < config->rx_ring_num; i++) {
4486                 if ((ret = fill_rx_buffers(sp, i))) {
4487                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4488                                   dev->name);
4489                         s2io_reset(sp);
4490                         free_rx_buffers(sp);
4491                         return -ENOMEM;
4492                 }
4493                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4494                           atomic_read(&sp->rx_bufs_left[i]));
4495         }
4496
4497         /* Setting its receive mode */
4498         s2io_set_multicast(dev);
4499
4500         /* Enable tasklet for the device */
4501         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4502
4503         /* Enable Rx Traffic and interrupts on the NIC */
4504         if (start_nic(sp)) {
4505                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4506                 tasklet_kill(&sp->task);
4507                 s2io_reset(sp);
4508                 free_irq(dev->irq, dev);
4509                 free_rx_buffers(sp);
4510                 return -ENODEV;
4511         }
4512
4513         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4514
4515         atomic_set(&sp->card_state, CARD_UP);
4516         return 0;
4517 }
4518
4519 /**
4520  * s2io_restart_nic - Resets the NIC.
4521  * @data : long pointer to the device private structure
4522  * Description:
4523  * This function is scheduled to be run by the s2io_tx_watchdog
4524  * function after 0.5 secs to reset the NIC. The idea is to reduce
4525  * the run time of the watch dog routine which is run holding a
4526  * spin lock.
4527  */
4528
4529 static void s2io_restart_nic(unsigned long data)
4530 {
4531         struct net_device *dev = (struct net_device *) data;
4532         nic_t *sp = dev->priv;
4533
4534         s2io_card_down(sp);
4535         if (s2io_card_up(sp)) {
4536                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4537                           dev->name);
4538         }
4539         netif_wake_queue(dev);
4540         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4541                   dev->name);
4542
4543 }
4544
4545 /**
4546  *  s2io_tx_watchdog - Watchdog for transmit side.
4547  *  @dev : Pointer to net device structure
4548  *  Description:
4549  *  This function is triggered if the Tx Queue is stopped
4550  *  for a pre-defined amount of time when the Interface is still up.
4551  *  If the Interface is jammed in such a situation, the hardware is
4552  *  reset (by s2io_close) and restarted again (by s2io_open) to
4553  *  overcome any problem that might have been caused in the hardware.
4554  *  Return value:
4555  *  void
4556  */
4557
4558 static void s2io_tx_watchdog(struct net_device *dev)
4559 {
4560         nic_t *sp = dev->priv;
4561
4562         if (netif_carrier_ok(dev)) {
4563                 schedule_work(&sp->rst_timer_task);
4564         }
4565 }
4566
4567 /**
4568  *   rx_osm_handler - To perform some OS related operations on SKB.
4569  *   @sp: private member of the device structure,pointer to s2io_nic structure.
4570  *   @skb : the socket buffer pointer.
4571  *   @len : length of the packet
4572  *   @cksum : FCS checksum of the frame.
4573  *   @ring_no : the ring from which this RxD was extracted.
4574  *   Description:
4575  *   This function is called by the Tx interrupt serivce routine to perform
4576  *   some OS related operations on the SKB before passing it to the upper
4577  *   layers. It mainly checks if the checksum is OK, if so adds it to the
4578  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
4579  *   to the upper layer. If the checksum is wrong, it increments the Rx
4580  *   packet error count, frees the SKB and returns error.
4581  *   Return value:
4582  *   SUCCESS on success and -1 on failure.
4583  */
4584 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4585 {
4586         nic_t *sp = ring_data->nic;
4587         struct net_device *dev = (struct net_device *) sp->dev;
4588         struct sk_buff *skb = (struct sk_buff *)
4589                 ((unsigned long) rxdp->Host_Control);
4590         int ring_no = ring_data->ring_no;
4591         u16 l3_csum, l4_csum;
4592 #ifdef CONFIG_2BUFF_MODE
4593         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4594         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4595         int get_block = ring_data->rx_curr_get_info.block_index;
4596         int get_off = ring_data->rx_curr_get_info.offset;
4597         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4598         unsigned char *buff;
4599 #else
4600         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4601 #endif
4602         skb->dev = dev;
4603         if (rxdp->Control_1 & RXD_T_CODE) {
4604                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4605                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4606                           dev->name, err);
4607                 dev_kfree_skb(skb);
4608                 sp->stats.rx_crc_errors++;
4609                 atomic_dec(&sp->rx_bufs_left[ring_no]);
4610                 rxdp->Host_Control = 0;
4611                 return 0;
4612         }
4613
4614         /* Updating statistics */
4615         rxdp->Host_Control = 0;
4616         sp->rx_pkt_count++;
4617         sp->stats.rx_packets++;
4618 #ifndef CONFIG_2BUFF_MODE
4619         sp->stats.rx_bytes += len;
4620 #else
4621         sp->stats.rx_bytes += buf0_len + buf2_len;
4622 #endif
4623
4624 #ifndef CONFIG_2BUFF_MODE
4625         skb_put(skb, len);
4626 #else
4627         buff = skb_push(skb, buf0_len);
4628         memcpy(buff, ba->ba_0, buf0_len);
4629         skb_put(skb, buf2_len);
4630 #endif
4631
4632         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4633             (sp->rx_csum)) {
4634                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4635                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4636                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4637                         /*
4638                          * NIC verifies if the Checksum of the received
4639                          * frame is Ok or not and accordingly returns
4640                          * a flag in the RxD.
4641                          */
4642                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4643                 } else {
4644                         /*
4645                          * Packet with erroneous checksum, let the
4646                          * upper layers deal with it.
4647                          */
4648                         skb->ip_summed = CHECKSUM_NONE;
4649                 }
4650         } else {
4651                 skb->ip_summed = CHECKSUM_NONE;
4652         }
4653
4654         skb->protocol = eth_type_trans(skb, dev);
4655 #ifdef CONFIG_S2IO_NAPI
4656         netif_receive_skb(skb);
4657 #else
4658         netif_rx(skb);
4659 #endif
4660         dev->last_rx = jiffies;
4661         atomic_dec(&sp->rx_bufs_left[ring_no]);
4662         return SUCCESS;
4663 }
4664
4665 /**
4666  *  s2io_link - stops/starts the Tx queue.
4667  *  @sp : private member of the device structure, which is a pointer to the
4668  *  s2io_nic structure.
4669  *  @link : inidicates whether link is UP/DOWN.
4670  *  Description:
4671  *  This function stops/starts the Tx queue depending on whether the link
4672  *  status of the NIC is is down or up. This is called by the Alarm
4673  *  interrupt handler whenever a link change interrupt comes up.
4674  *  Return value:
4675  *  void.
4676  */
4677
4678 void s2io_link(nic_t * sp, int link)
4679 {
4680         struct net_device *dev = (struct net_device *) sp->dev;
4681
4682         if (link != sp->last_link_state) {
4683                 if (link == LINK_DOWN) {
4684                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4685                         netif_carrier_off(dev);
4686                 } else {
4687                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4688                         netif_carrier_on(dev);
4689                 }
4690         }
4691         sp->last_link_state = link;
4692 }
4693
4694 /**
4695  *  get_xena_rev_id - to identify revision ID of xena.
4696  *  @pdev : PCI Dev structure
4697  *  Description:
4698  *  Function to identify the Revision ID of xena.
4699  *  Return value:
4700  *  returns the revision ID of the device.
4701  */
4702
4703 int get_xena_rev_id(struct pci_dev *pdev)
4704 {
4705         u8 id = 0;
4706         int ret;
4707         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4708         return id;
4709 }
4710
4711 /**
4712  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4713  *  @sp : private member of the device structure, which is a pointer to the
4714  *  s2io_nic structure.
4715  *  Description:
4716  *  This function initializes a few of the PCI and PCI-X configuration registers
4717  *  with recommended values.
4718  *  Return value:
4719  *  void
4720  */
4721
4722 static void s2io_init_pci(nic_t * sp)
4723 {
4724         u16 pci_cmd = 0, pcix_cmd = 0;
4725
4726         /* Enable Data Parity Error Recovery in PCI-X command register. */
4727         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4728                              &(pcix_cmd));
4729         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4730                               (pcix_cmd | 1));
4731         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4732                              &(pcix_cmd));
4733
4734         /* Set the PErr Response bit in PCI command register. */
4735         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4736         pci_write_config_word(sp->pdev, PCI_COMMAND,
4737                               (pci_cmd | PCI_COMMAND_PARITY));
4738         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4739
4740         /* Forcibly disabling relaxed ordering capability of the card. */
4741         pcix_cmd &= 0xfffd;
4742         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4743                               pcix_cmd);
4744         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4745                              &(pcix_cmd));
4746 }
4747
4748 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4749 MODULE_LICENSE("GPL");
4750 module_param(tx_fifo_num, int, 0);
4751 module_param(rx_ring_num, int, 0);
4752 module_param_array(tx_fifo_len, uint, NULL, 0);
4753 module_param_array(rx_ring_sz, uint, NULL, 0);
4754 module_param_array(rts_frm_len, uint, NULL, 0);
4755 module_param(use_continuous_tx_intrs, int, 1);
4756 module_param(rmac_pause_time, int, 0);
4757 module_param(mc_pause_threshold_q0q3, int, 0);
4758 module_param(mc_pause_threshold_q4q7, int, 0);
4759 module_param(shared_splits, int, 0);
4760 module_param(tmac_util_period, int, 0);
4761 module_param(rmac_util_period, int, 0);
4762 #ifndef CONFIG_S2IO_NAPI
4763 module_param(indicate_max_pkts, int, 0);
4764 #endif
4765
4766 /**
4767  *  s2io_init_nic - Initialization of the adapter .
4768  *  @pdev : structure containing the PCI related information of the device.
4769  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4770  *  Description:
4771  *  The function initializes an adapter identified by the pci_dec structure.
4772  *  All OS related initialization including memory and device structure and
4773  *  initlaization of the device private variable is done. Also the swapper
4774  *  control register is initialized to enable read and write into the I/O
4775  *  registers of the device.
4776  *  Return value:
4777  *  returns 0 on success and negative on failure.
4778  */
4779
4780 static int __devinit
4781 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4782 {
4783         nic_t *sp;
4784         struct net_device *dev;
4785         int i, j, ret;
4786         int dma_flag = FALSE;
4787         u32 mac_up, mac_down;
4788         u64 val64 = 0, tmp64 = 0;
4789         XENA_dev_config_t __iomem *bar0 = NULL;
4790         u16 subid;
4791         mac_info_t *mac_control;
4792         struct config_param *config;
4793
4794 #ifdef CONFIG_S2IO_NAPI
4795         DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4796 #endif
4797
4798         if ((ret = pci_enable_device(pdev))) {
4799                 DBG_PRINT(ERR_DBG,
4800                           "s2io_init_nic: pci_enable_device failed\n");
4801                 return ret;
4802         }
4803
4804         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4805                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4806                 dma_flag = TRUE;
4807                 if (pci_set_consistent_dma_mask
4808                     (pdev, DMA_64BIT_MASK)) {
4809                         DBG_PRINT(ERR_DBG,
4810                                   "Unable to obtain 64bit DMA for \
4811                                         consistent allocations\n");
4812                         pci_disable_device(pdev);
4813                         return -ENOMEM;
4814                 }
4815         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4816                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4817         } else {
4818                 pci_disable_device(pdev);
4819                 return -ENOMEM;
4820         }
4821
4822         if (pci_request_regions(pdev, s2io_driver_name)) {
4823                 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4824                     pci_disable_device(pdev);
4825                 return -ENODEV;
4826         }
4827
4828         dev = alloc_etherdev(sizeof(nic_t));
4829         if (dev == NULL) {
4830                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4831                 pci_disable_device(pdev);
4832                 pci_release_regions(pdev);
4833                 return -ENODEV;
4834         }
4835
4836         pci_set_master(pdev);
4837         pci_set_drvdata(pdev, dev);
4838         SET_MODULE_OWNER(dev);
4839         SET_NETDEV_DEV(dev, &pdev->dev);
4840
4841         /*  Private member variable initialized to s2io NIC structure */
4842         sp = dev->priv;
4843         memset(sp, 0, sizeof(nic_t));
4844         sp->dev = dev;
4845         sp->pdev = pdev;
4846         sp->high_dma_flag = dma_flag;
4847         sp->device_enabled_once = FALSE;
4848
4849         /* Initialize some PCI/PCI-X fields of the NIC. */
4850         s2io_init_pci(sp);
4851
4852         /*
4853          * Setting the device configuration parameters.
4854          * Most of these parameters can be specified by the user during
4855          * module insertion as they are module loadable parameters. If
4856          * these parameters are not not specified during load time, they
4857          * are initialized with default values.
4858          */
4859         mac_control = &sp->mac_control;
4860         config = &sp->config;
4861
4862         /* Tx side parameters. */
4863         tx_fifo_len[0] = DEFAULT_FIFO_LEN;      /* Default value. */
4864         config->tx_fifo_num = tx_fifo_num;
4865         for (i = 0; i < MAX_TX_FIFOS; i++) {
4866                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4867                 config->tx_cfg[i].fifo_priority = i;
4868         }
4869
4870         /* mapping the QoS priority to the configured fifos */
4871         for (i = 0; i < MAX_TX_FIFOS; i++)
4872                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4873
4874         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4875         for (i = 0; i < config->tx_fifo_num; i++) {
4876                 config->tx_cfg[i].f_no_snoop =
4877                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4878                 if (config->tx_cfg[i].fifo_len < 65) {
4879                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4880                         break;
4881                 }
4882         }
4883         config->max_txds = MAX_SKB_FRAGS;
4884
4885         /* Rx side parameters. */
4886         rx_ring_sz[0] = SMALL_BLK_CNT;  /* Default value. */
4887         config->rx_ring_num = rx_ring_num;
4888         for (i = 0; i < MAX_RX_RINGS; i++) {
4889                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4890                     (MAX_RXDS_PER_BLOCK + 1);
4891                 config->rx_cfg[i].ring_priority = i;
4892         }
4893
4894         for (i = 0; i < rx_ring_num; i++) {
4895                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4896                 config->rx_cfg[i].f_no_snoop =
4897                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4898         }
4899
4900         /*  Setting Mac Control parameters */
4901         mac_control->rmac_pause_time = rmac_pause_time;
4902         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4903         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4904
4905
4906         /* Initialize Ring buffer parameters. */
4907         for (i = 0; i < config->rx_ring_num; i++)
4908                 atomic_set(&sp->rx_bufs_left[i], 0);
4909
4910         /* Initialize the number of ISRs currently running */
4911         atomic_set(&sp->isr_cnt, 0);
4912
4913         /*  initialize the shared memory used by the NIC and the host */
4914         if (init_shared_mem(sp)) {
4915                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4916                           dev->name);
4917                 ret = -ENOMEM;
4918                 goto mem_alloc_failed;
4919         }
4920
4921         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4922                                      pci_resource_len(pdev, 0));
4923         if (!sp->bar0) {
4924                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4925                           dev->name);
4926                 ret = -ENOMEM;
4927                 goto bar0_remap_failed;
4928         }
4929
4930         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4931                                      pci_resource_len(pdev, 2));
4932         if (!sp->bar1) {
4933                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4934                           dev->name);
4935                 ret = -ENOMEM;
4936                 goto bar1_remap_failed;
4937         }
4938
4939         dev->irq = pdev->irq;
4940         dev->base_addr = (unsigned long) sp->bar0;
4941
4942         /* Initializing the BAR1 address as the start of the FIFO pointer. */
4943         for (j = 0; j < MAX_TX_FIFOS; j++) {
4944                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4945                     (sp->bar1 + (j * 0x00020000));
4946         }
4947
4948         /*  Driver entry points */
4949         dev->open = &s2io_open;
4950         dev->stop = &s2io_close;
4951         dev->hard_start_xmit = &s2io_xmit;
4952         dev->get_stats = &s2io_get_stats;
4953         dev->set_multicast_list = &s2io_set_multicast;
4954         dev->do_ioctl = &s2io_ioctl;
4955         dev->change_mtu = &s2io_change_mtu;
4956         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4957
4958         /*
4959          * will use eth_mac_addr() for  dev->set_mac_address
4960          * mac address will be set every time dev->open() is called
4961          */
4962 #if defined(CONFIG_S2IO_NAPI)
4963         dev->poll = s2io_poll;
4964         dev->weight = 32;
4965 #endif
4966
4967         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4968         if (sp->high_dma_flag == TRUE)
4969                 dev->features |= NETIF_F_HIGHDMA;
4970 #ifdef NETIF_F_TSO
4971         dev->features |= NETIF_F_TSO;
4972 #endif
4973
4974         dev->tx_timeout = &s2io_tx_watchdog;
4975         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4976         INIT_WORK(&sp->rst_timer_task,
4977                   (void (*)(void *)) s2io_restart_nic, dev);
4978         INIT_WORK(&sp->set_link_task,
4979                   (void (*)(void *)) s2io_set_link, sp);
4980
4981         pci_save_state(sp->pdev);
4982
4983         /* Setting swapper control on the NIC, for proper reset operation */
4984         if (s2io_set_swapper(sp)) {
4985                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4986                           dev->name);
4987                 ret = -EAGAIN;
4988                 goto set_swap_failed;
4989         }
4990
4991         /*
4992          * Fix for all "FFs" MAC address problems observed on
4993          * Alpha platforms
4994          */
4995         fix_mac_address(sp);
4996         s2io_reset(sp);
4997
4998         /*
4999          * MAC address initialization.
5000          * For now only one mac address will be read and used.
5001          */
5002         bar0 = sp->bar0;
5003         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5004             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5005         writeq(val64, &bar0->rmac_addr_cmd_mem);
5006         wait_for_cmd_complete(sp);
5007
5008         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5009         mac_down = (u32) tmp64;
5010         mac_up = (u32) (tmp64 >> 32);
5011
5012         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5013
5014         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5015         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5016         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5017         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5018         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5019         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5020
5021         DBG_PRINT(INIT_DBG,
5022                   "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
5023                   sp->def_mac_addr[0].mac_addr[0],
5024                   sp->def_mac_addr[0].mac_addr[1],
5025                   sp->def_mac_addr[0].mac_addr[2],
5026                   sp->def_mac_addr[0].mac_addr[3],
5027                   sp->def_mac_addr[0].mac_addr[4],
5028                   sp->def_mac_addr[0].mac_addr[5]);
5029
5030         /*  Set the factory defined MAC address initially   */
5031         dev->addr_len = ETH_ALEN;
5032         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5033
5034         /*
5035          * Initialize the tasklet status and link state flags
5036          * and the card statte parameter
5037          */
5038         atomic_set(&(sp->card_state), 0);
5039         sp->tasklet_status = 0;
5040         sp->link_state = 0;
5041
5042         /* Initialize spinlocks */
5043         spin_lock_init(&sp->tx_lock);
5044 #ifndef CONFIG_S2IO_NAPI
5045         spin_lock_init(&sp->put_lock);
5046 #endif
5047         spin_lock_init(&sp->rx_lock);
5048
5049         /*
5050          * SXE-002: Configure link and activity LED to init state
5051          * on driver load.
5052          */
5053         subid = sp->pdev->subsystem_device;
5054         if ((subid & 0xFF) >= 0x07) {
5055                 val64 = readq(&bar0->gpio_control);
5056                 val64 |= 0x0000800000000000ULL;
5057                 writeq(val64, &bar0->gpio_control);
5058                 val64 = 0x0411040400000000ULL;
5059                 writeq(val64, (void __iomem *) bar0 + 0x2700);
5060                 val64 = readq(&bar0->gpio_control);
5061         }
5062
5063         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
5064
5065         if (register_netdev(dev)) {
5066                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5067                 ret = -ENODEV;
5068                 goto register_failed;
5069         }
5070
5071         /* Initialize device name */
5072         strcpy(sp->name, dev->name);
5073         strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5074
5075         /*
5076          * Make Link state as off at this point, when the Link change
5077          * interrupt comes the state will be automatically changed to
5078          * the right state.
5079          */
5080         netif_carrier_off(dev);
5081
5082         return 0;
5083
5084       register_failed:
5085       set_swap_failed:
5086         iounmap(sp->bar1);
5087       bar1_remap_failed:
5088         iounmap(sp->bar0);
5089       bar0_remap_failed:
5090       mem_alloc_failed:
5091         free_shared_mem(sp);
5092         pci_disable_device(pdev);
5093         pci_release_regions(pdev);
5094         pci_set_drvdata(pdev, NULL);
5095         free_netdev(dev);
5096
5097         return ret;
5098 }
5099
5100 /**
5101  * s2io_rem_nic - Free the PCI device
5102  * @pdev: structure containing the PCI related information of the device.
5103  * Description: This function is called by the Pci subsystem to release a
5104  * PCI device and free up all resource held up by the device. This could
5105  * be in response to a Hot plug event or when the driver is to be removed
5106  * from memory.
5107  */
5108
5109 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5110 {
5111         struct net_device *dev =
5112             (struct net_device *) pci_get_drvdata(pdev);
5113         nic_t *sp;
5114
5115         if (dev == NULL) {
5116                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5117                 return;
5118         }
5119
5120         sp = dev->priv;
5121         unregister_netdev(dev);
5122
5123         free_shared_mem(sp);
5124         iounmap(sp->bar0);
5125         iounmap(sp->bar1);
5126         pci_disable_device(pdev);
5127         pci_release_regions(pdev);
5128         pci_set_drvdata(pdev, NULL);
5129         free_netdev(dev);
5130 }
5131
5132 /**
5133  * s2io_starter - Entry point for the driver
5134  * Description: This function is the entry point for the driver. It verifies
5135  * the module loadable parameters and initializes PCI configuration space.
5136  */
5137
5138 int __init s2io_starter(void)
5139 {
5140         return pci_module_init(&s2io_driver);
5141 }
5142
5143 /**
5144  * s2io_closer - Cleanup routine for the driver
5145  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5146  */
5147
5148 void s2io_closer(void)
5149 {
5150         pci_unregister_driver(&s2io_driver);
5151         DBG_PRINT(INIT_DBG, "cleanup done\n");
5152 }
5153
5154 module_init(s2io_starter);
5155 module_exit(s2io_closer);