S2io: Fixes to enable multiple transmit fifo support
[safe/jmp/linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.26.15-1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
134 {
135         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
136 }
137
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140         "Register test\t(offline)",
141         "Eeprom test\t(offline)",
142         "Link test\t(online)",
143         "RLDRAM test\t(offline)",
144         "BIST Test\t(offline)"
145 };
146
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148         {"tmac_frms"},
149         {"tmac_data_octets"},
150         {"tmac_drop_frms"},
151         {"tmac_mcst_frms"},
152         {"tmac_bcst_frms"},
153         {"tmac_pause_ctrl_frms"},
154         {"tmac_ttl_octets"},
155         {"tmac_ucst_frms"},
156         {"tmac_nucst_frms"},
157         {"tmac_any_err_frms"},
158         {"tmac_ttl_less_fb_octets"},
159         {"tmac_vld_ip_octets"},
160         {"tmac_vld_ip"},
161         {"tmac_drop_ip"},
162         {"tmac_icmp"},
163         {"tmac_rst_tcp"},
164         {"tmac_tcp"},
165         {"tmac_udp"},
166         {"rmac_vld_frms"},
167         {"rmac_data_octets"},
168         {"rmac_fcs_err_frms"},
169         {"rmac_drop_frms"},
170         {"rmac_vld_mcst_frms"},
171         {"rmac_vld_bcst_frms"},
172         {"rmac_in_rng_len_err_frms"},
173         {"rmac_out_rng_len_err_frms"},
174         {"rmac_long_frms"},
175         {"rmac_pause_ctrl_frms"},
176         {"rmac_unsup_ctrl_frms"},
177         {"rmac_ttl_octets"},
178         {"rmac_accepted_ucst_frms"},
179         {"rmac_accepted_nucst_frms"},
180         {"rmac_discarded_frms"},
181         {"rmac_drop_events"},
182         {"rmac_ttl_less_fb_octets"},
183         {"rmac_ttl_frms"},
184         {"rmac_usized_frms"},
185         {"rmac_osized_frms"},
186         {"rmac_frag_frms"},
187         {"rmac_jabber_frms"},
188         {"rmac_ttl_64_frms"},
189         {"rmac_ttl_65_127_frms"},
190         {"rmac_ttl_128_255_frms"},
191         {"rmac_ttl_256_511_frms"},
192         {"rmac_ttl_512_1023_frms"},
193         {"rmac_ttl_1024_1518_frms"},
194         {"rmac_ip"},
195         {"rmac_ip_octets"},
196         {"rmac_hdr_err_ip"},
197         {"rmac_drop_ip"},
198         {"rmac_icmp"},
199         {"rmac_tcp"},
200         {"rmac_udp"},
201         {"rmac_err_drp_udp"},
202         {"rmac_xgmii_err_sym"},
203         {"rmac_frms_q0"},
204         {"rmac_frms_q1"},
205         {"rmac_frms_q2"},
206         {"rmac_frms_q3"},
207         {"rmac_frms_q4"},
208         {"rmac_frms_q5"},
209         {"rmac_frms_q6"},
210         {"rmac_frms_q7"},
211         {"rmac_full_q0"},
212         {"rmac_full_q1"},
213         {"rmac_full_q2"},
214         {"rmac_full_q3"},
215         {"rmac_full_q4"},
216         {"rmac_full_q5"},
217         {"rmac_full_q6"},
218         {"rmac_full_q7"},
219         {"rmac_pause_cnt"},
220         {"rmac_xgmii_data_err_cnt"},
221         {"rmac_xgmii_ctrl_err_cnt"},
222         {"rmac_accepted_ip"},
223         {"rmac_err_tcp"},
224         {"rd_req_cnt"},
225         {"new_rd_req_cnt"},
226         {"new_rd_req_rtry_cnt"},
227         {"rd_rtry_cnt"},
228         {"wr_rtry_rd_ack_cnt"},
229         {"wr_req_cnt"},
230         {"new_wr_req_cnt"},
231         {"new_wr_req_rtry_cnt"},
232         {"wr_rtry_cnt"},
233         {"wr_disc_cnt"},
234         {"rd_rtry_wr_ack_cnt"},
235         {"txp_wr_cnt"},
236         {"txd_rd_cnt"},
237         {"txd_wr_cnt"},
238         {"rxd_rd_cnt"},
239         {"rxd_wr_cnt"},
240         {"txf_rd_cnt"},
241         {"rxf_wr_cnt"}
242 };
243
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245         {"rmac_ttl_1519_4095_frms"},
246         {"rmac_ttl_4096_8191_frms"},
247         {"rmac_ttl_8192_max_frms"},
248         {"rmac_ttl_gt_max_frms"},
249         {"rmac_osized_alt_frms"},
250         {"rmac_jabber_alt_frms"},
251         {"rmac_gt_max_alt_frms"},
252         {"rmac_vlan_frms"},
253         {"rmac_len_discard"},
254         {"rmac_fcs_discard"},
255         {"rmac_pf_discard"},
256         {"rmac_da_discard"},
257         {"rmac_red_discard"},
258         {"rmac_rts_discard"},
259         {"rmac_ingm_full_discard"},
260         {"link_fault_cnt"}
261 };
262
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264         {"\n DRIVER STATISTICS"},
265         {"single_bit_ecc_errs"},
266         {"double_bit_ecc_errs"},
267         {"parity_err_cnt"},
268         {"serious_err_cnt"},
269         {"soft_reset_cnt"},
270         {"fifo_full_cnt"},
271         {"ring_0_full_cnt"},
272         {"ring_1_full_cnt"},
273         {"ring_2_full_cnt"},
274         {"ring_3_full_cnt"},
275         {"ring_4_full_cnt"},
276         {"ring_5_full_cnt"},
277         {"ring_6_full_cnt"},
278         {"ring_7_full_cnt"},
279         {"alarm_transceiver_temp_high"},
280         {"alarm_transceiver_temp_low"},
281         {"alarm_laser_bias_current_high"},
282         {"alarm_laser_bias_current_low"},
283         {"alarm_laser_output_power_high"},
284         {"alarm_laser_output_power_low"},
285         {"warn_transceiver_temp_high"},
286         {"warn_transceiver_temp_low"},
287         {"warn_laser_bias_current_high"},
288         {"warn_laser_bias_current_low"},
289         {"warn_laser_output_power_high"},
290         {"warn_laser_output_power_low"},
291         {"lro_aggregated_pkts"},
292         {"lro_flush_both_count"},
293         {"lro_out_of_sequence_pkts"},
294         {"lro_flush_due_to_max_pkts"},
295         {"lro_avg_aggr_pkts"},
296         {"mem_alloc_fail_cnt"},
297         {"pci_map_fail_cnt"},
298         {"watchdog_timer_cnt"},
299         {"mem_allocated"},
300         {"mem_freed"},
301         {"link_up_cnt"},
302         {"link_down_cnt"},
303         {"link_up_time"},
304         {"link_down_time"},
305         {"tx_tcode_buf_abort_cnt"},
306         {"tx_tcode_desc_abort_cnt"},
307         {"tx_tcode_parity_err_cnt"},
308         {"tx_tcode_link_loss_cnt"},
309         {"tx_tcode_list_proc_err_cnt"},
310         {"rx_tcode_parity_err_cnt"},
311         {"rx_tcode_abort_cnt"},
312         {"rx_tcode_parity_abort_cnt"},
313         {"rx_tcode_rda_fail_cnt"},
314         {"rx_tcode_unkn_prot_cnt"},
315         {"rx_tcode_fcs_err_cnt"},
316         {"rx_tcode_buf_size_err_cnt"},
317         {"rx_tcode_rxd_corrupt_cnt"},
318         {"rx_tcode_unkn_err_cnt"},
319         {"tda_err_cnt"},
320         {"pfc_err_cnt"},
321         {"pcc_err_cnt"},
322         {"tti_err_cnt"},
323         {"tpa_err_cnt"},
324         {"sm_err_cnt"},
325         {"lso_err_cnt"},
326         {"mac_tmac_err_cnt"},
327         {"mac_rmac_err_cnt"},
328         {"xgxs_txgxs_err_cnt"},
329         {"xgxs_rxgxs_err_cnt"},
330         {"rc_err_cnt"},
331         {"prc_pcix_err_cnt"},
332         {"rpa_err_cnt"},
333         {"rda_err_cnt"},
334         {"rti_err_cnt"},
335         {"mc_err_cnt"}
336 };
337
338 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
339 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
340 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
341
342 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
343 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
344
345 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
346 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
347
348 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
349 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
350
351 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
352                         init_timer(&timer);                     \
353                         timer.function = handle;                \
354                         timer.data = (unsigned long) arg;       \
355                         mod_timer(&timer, (jiffies + exp))      \
356
357 /* copy mac addr to def_mac_addr array */
358 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
359 {
360         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
361         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
362         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
363         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
364         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
365         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
366 }
367 /* Add the vlan */
368 static void s2io_vlan_rx_register(struct net_device *dev,
369                                         struct vlan_group *grp)
370 {
371         int i;
372         struct s2io_nic *nic = dev->priv;
373         unsigned long flags[MAX_TX_FIFOS];
374         struct mac_info *mac_control = &nic->mac_control;
375         struct config_param *config = &nic->config;
376
377         for (i = 0; i < config->tx_fifo_num; i++)
378                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
379
380         nic->vlgrp = grp;
381         for (i = config->tx_fifo_num - 1; i >= 0; i--)
382                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
383                                 flags[i]);
384 }
385
386 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
387 static int vlan_strip_flag;
388
389 /*
390  * Constants to be programmed into the Xena's registers, to configure
391  * the XAUI.
392  */
393
394 #define END_SIGN        0x0
395 static const u64 herc_act_dtx_cfg[] = {
396         /* Set address */
397         0x8000051536750000ULL, 0x80000515367500E0ULL,
398         /* Write data */
399         0x8000051536750004ULL, 0x80000515367500E4ULL,
400         /* Set address */
401         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
402         /* Write data */
403         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
404         /* Set address */
405         0x801205150D440000ULL, 0x801205150D4400E0ULL,
406         /* Write data */
407         0x801205150D440004ULL, 0x801205150D4400E4ULL,
408         /* Set address */
409         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
410         /* Write data */
411         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
412         /* Done */
413         END_SIGN
414 };
415
416 static const u64 xena_dtx_cfg[] = {
417         /* Set address */
418         0x8000051500000000ULL, 0x80000515000000E0ULL,
419         /* Write data */
420         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
421         /* Set address */
422         0x8001051500000000ULL, 0x80010515000000E0ULL,
423         /* Write data */
424         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
425         /* Set address */
426         0x8002051500000000ULL, 0x80020515000000E0ULL,
427         /* Write data */
428         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
429         END_SIGN
430 };
431
432 /*
433  * Constants for Fixing the MacAddress problem seen mostly on
434  * Alpha machines.
435  */
436 static const u64 fix_mac[] = {
437         0x0060000000000000ULL, 0x0060600000000000ULL,
438         0x0040600000000000ULL, 0x0000600000000000ULL,
439         0x0020600000000000ULL, 0x0060600000000000ULL,
440         0x0020600000000000ULL, 0x0060600000000000ULL,
441         0x0020600000000000ULL, 0x0060600000000000ULL,
442         0x0020600000000000ULL, 0x0060600000000000ULL,
443         0x0020600000000000ULL, 0x0060600000000000ULL,
444         0x0020600000000000ULL, 0x0060600000000000ULL,
445         0x0020600000000000ULL, 0x0060600000000000ULL,
446         0x0020600000000000ULL, 0x0060600000000000ULL,
447         0x0020600000000000ULL, 0x0060600000000000ULL,
448         0x0020600000000000ULL, 0x0060600000000000ULL,
449         0x0020600000000000ULL, 0x0000600000000000ULL,
450         0x0040600000000000ULL, 0x0060600000000000ULL,
451         END_SIGN
452 };
453
454 MODULE_LICENSE("GPL");
455 MODULE_VERSION(DRV_VERSION);
456
457
458 /* Module Loadable parameters. */
459 S2IO_PARM_INT(tx_fifo_num, 1);
460 S2IO_PARM_INT(rx_ring_num, 1);
461
462
463 S2IO_PARM_INT(rx_ring_mode, 1);
464 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
465 S2IO_PARM_INT(rmac_pause_time, 0x100);
466 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
467 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
468 S2IO_PARM_INT(shared_splits, 0);
469 S2IO_PARM_INT(tmac_util_period, 5);
470 S2IO_PARM_INT(rmac_util_period, 5);
471 S2IO_PARM_INT(l3l4hdr_size, 128);
472 /* Frequency of Rx desc syncs expressed as power of 2 */
473 S2IO_PARM_INT(rxsync_frequency, 3);
474 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
475 S2IO_PARM_INT(intr_type, 2);
476 /* Large receive offload feature */
477 static unsigned int lro_enable;
478 module_param_named(lro, lro_enable, uint, 0);
479
480 /* Max pkts to be aggregated by LRO at one time. If not specified,
481  * aggregation happens until we hit max IP pkt size(64K)
482  */
483 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
484 S2IO_PARM_INT(indicate_max_pkts, 0);
485
486 S2IO_PARM_INT(napi, 1);
487 S2IO_PARM_INT(ufo, 0);
488 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
489
490 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
491     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
492 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
493     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
494 static unsigned int rts_frm_len[MAX_RX_RINGS] =
495     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
496
497 module_param_array(tx_fifo_len, uint, NULL, 0);
498 module_param_array(rx_ring_sz, uint, NULL, 0);
499 module_param_array(rts_frm_len, uint, NULL, 0);
500
501 /*
502  * S2IO device table.
503  * This table lists all the devices that this driver supports.
504  */
505 static struct pci_device_id s2io_tbl[] __devinitdata = {
506         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
507          PCI_ANY_ID, PCI_ANY_ID},
508         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
509          PCI_ANY_ID, PCI_ANY_ID},
510         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
511          PCI_ANY_ID, PCI_ANY_ID},
512         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
513          PCI_ANY_ID, PCI_ANY_ID},
514         {0,}
515 };
516
517 MODULE_DEVICE_TABLE(pci, s2io_tbl);
518
519 static struct pci_error_handlers s2io_err_handler = {
520         .error_detected = s2io_io_error_detected,
521         .slot_reset = s2io_io_slot_reset,
522         .resume = s2io_io_resume,
523 };
524
525 static struct pci_driver s2io_driver = {
526       .name = "S2IO",
527       .id_table = s2io_tbl,
528       .probe = s2io_init_nic,
529       .remove = __devexit_p(s2io_rem_nic),
530       .err_handler = &s2io_err_handler,
531 };
532
533 /* A simplifier macro used both by init and free shared_mem Fns(). */
534 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
535
536 /**
537  * init_shared_mem - Allocation and Initialization of Memory
538  * @nic: Device private variable.
539  * Description: The function allocates all the memory areas shared
540  * between the NIC and the driver. This includes Tx descriptors,
541  * Rx descriptors and the statistics block.
542  */
543
544 static int init_shared_mem(struct s2io_nic *nic)
545 {
546         u32 size;
547         void *tmp_v_addr, *tmp_v_addr_next;
548         dma_addr_t tmp_p_addr, tmp_p_addr_next;
549         struct RxD_block *pre_rxd_blk = NULL;
550         int i, j, blk_cnt;
551         int lst_size, lst_per_page;
552         struct net_device *dev = nic->dev;
553         unsigned long tmp;
554         struct buffAdd *ba;
555
556         struct mac_info *mac_control;
557         struct config_param *config;
558         unsigned long long mem_allocated = 0;
559
560         mac_control = &nic->mac_control;
561         config = &nic->config;
562
563
564         /* Allocation and initialization of TXDLs in FIOFs */
565         size = 0;
566         for (i = 0; i < config->tx_fifo_num; i++) {
567                 size += config->tx_cfg[i].fifo_len;
568         }
569         if (size > MAX_AVAILABLE_TXDS) {
570                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
571                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
572                 return -EINVAL;
573         }
574
575         size = 0;
576         for (i = 0; i < config->tx_fifo_num; i++) {
577                 size = config->tx_cfg[i].fifo_len;
578                 /*
579                  * Legal values are from 2 to 8192
580                  */
581                 if (size < 2) {
582                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
583                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
584                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
585                                 "are 2 to 8192\n");
586                         return -EINVAL;
587                 }
588         }
589
590         lst_size = (sizeof(struct TxD) * config->max_txds);
591         lst_per_page = PAGE_SIZE / lst_size;
592
593         for (i = 0; i < config->tx_fifo_num; i++) {
594                 int fifo_len = config->tx_cfg[i].fifo_len;
595                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
596                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
597                                                           GFP_KERNEL);
598                 if (!mac_control->fifos[i].list_info) {
599                         DBG_PRINT(INFO_DBG,
600                                   "Malloc failed for list_info\n");
601                         return -ENOMEM;
602                 }
603                 mem_allocated += list_holder_size;
604         }
605         for (i = 0; i < config->tx_fifo_num; i++) {
606                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
607                                                 lst_per_page);
608                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
609                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
610                     config->tx_cfg[i].fifo_len - 1;
611                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
612                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
613                     config->tx_cfg[i].fifo_len - 1;
614                 mac_control->fifos[i].fifo_no = i;
615                 mac_control->fifos[i].nic = nic;
616                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
617
618                 for (j = 0; j < page_num; j++) {
619                         int k = 0;
620                         dma_addr_t tmp_p;
621                         void *tmp_v;
622                         tmp_v = pci_alloc_consistent(nic->pdev,
623                                                      PAGE_SIZE, &tmp_p);
624                         if (!tmp_v) {
625                                 DBG_PRINT(INFO_DBG,
626                                           "pci_alloc_consistent ");
627                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
628                                 return -ENOMEM;
629                         }
630                         /* If we got a zero DMA address(can happen on
631                          * certain platforms like PPC), reallocate.
632                          * Store virtual address of page we don't want,
633                          * to be freed later.
634                          */
635                         if (!tmp_p) {
636                                 mac_control->zerodma_virt_addr = tmp_v;
637                                 DBG_PRINT(INIT_DBG,
638                                 "%s: Zero DMA address for TxDL. ", dev->name);
639                                 DBG_PRINT(INIT_DBG,
640                                 "Virtual address %p\n", tmp_v);
641                                 tmp_v = pci_alloc_consistent(nic->pdev,
642                                                      PAGE_SIZE, &tmp_p);
643                                 if (!tmp_v) {
644                                         DBG_PRINT(INFO_DBG,
645                                           "pci_alloc_consistent ");
646                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
647                                         return -ENOMEM;
648                                 }
649                                 mem_allocated += PAGE_SIZE;
650                         }
651                         while (k < lst_per_page) {
652                                 int l = (j * lst_per_page) + k;
653                                 if (l == config->tx_cfg[i].fifo_len)
654                                         break;
655                                 mac_control->fifos[i].list_info[l].list_virt_addr =
656                                     tmp_v + (k * lst_size);
657                                 mac_control->fifos[i].list_info[l].list_phy_addr =
658                                     tmp_p + (k * lst_size);
659                                 k++;
660                         }
661                 }
662         }
663
664         for (i = 0; i < config->tx_fifo_num; i++) {
665                 size = config->tx_cfg[i].fifo_len;
666                 mac_control->fifos[i].ufo_in_band_v
667                         = kcalloc(size, sizeof(u64), GFP_KERNEL);
668                 if (!mac_control->fifos[i].ufo_in_band_v)
669                         return -ENOMEM;
670                 mem_allocated += (size * sizeof(u64));
671         }
672
673         /* Allocation and initialization of RXDs in Rings */
674         size = 0;
675         for (i = 0; i < config->rx_ring_num; i++) {
676                 if (config->rx_cfg[i].num_rxd %
677                     (rxd_count[nic->rxd_mode] + 1)) {
678                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
679                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
680                                   i);
681                         DBG_PRINT(ERR_DBG, "RxDs per Block");
682                         return FAILURE;
683                 }
684                 size += config->rx_cfg[i].num_rxd;
685                 mac_control->rings[i].block_count =
686                         config->rx_cfg[i].num_rxd /
687                         (rxd_count[nic->rxd_mode] + 1 );
688                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
689                         mac_control->rings[i].block_count;
690         }
691         if (nic->rxd_mode == RXD_MODE_1)
692                 size = (size * (sizeof(struct RxD1)));
693         else
694                 size = (size * (sizeof(struct RxD3)));
695
696         for (i = 0; i < config->rx_ring_num; i++) {
697                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
698                 mac_control->rings[i].rx_curr_get_info.offset = 0;
699                 mac_control->rings[i].rx_curr_get_info.ring_len =
700                     config->rx_cfg[i].num_rxd - 1;
701                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
702                 mac_control->rings[i].rx_curr_put_info.offset = 0;
703                 mac_control->rings[i].rx_curr_put_info.ring_len =
704                     config->rx_cfg[i].num_rxd - 1;
705                 mac_control->rings[i].nic = nic;
706                 mac_control->rings[i].ring_no = i;
707
708                 blk_cnt = config->rx_cfg[i].num_rxd /
709                                 (rxd_count[nic->rxd_mode] + 1);
710                 /*  Allocating all the Rx blocks */
711                 for (j = 0; j < blk_cnt; j++) {
712                         struct rx_block_info *rx_blocks;
713                         int l;
714
715                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
716                         size = SIZE_OF_BLOCK; //size is always page size
717                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
718                                                           &tmp_p_addr);
719                         if (tmp_v_addr == NULL) {
720                                 /*
721                                  * In case of failure, free_shared_mem()
722                                  * is called, which should free any
723                                  * memory that was alloced till the
724                                  * failure happened.
725                                  */
726                                 rx_blocks->block_virt_addr = tmp_v_addr;
727                                 return -ENOMEM;
728                         }
729                         mem_allocated += size;
730                         memset(tmp_v_addr, 0, size);
731                         rx_blocks->block_virt_addr = tmp_v_addr;
732                         rx_blocks->block_dma_addr = tmp_p_addr;
733                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
734                                                   rxd_count[nic->rxd_mode],
735                                                   GFP_KERNEL);
736                         if (!rx_blocks->rxds)
737                                 return -ENOMEM;
738                         mem_allocated +=
739                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
740                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
741                                 rx_blocks->rxds[l].virt_addr =
742                                         rx_blocks->block_virt_addr +
743                                         (rxd_size[nic->rxd_mode] * l);
744                                 rx_blocks->rxds[l].dma_addr =
745                                         rx_blocks->block_dma_addr +
746                                         (rxd_size[nic->rxd_mode] * l);
747                         }
748                 }
749                 /* Interlinking all Rx Blocks */
750                 for (j = 0; j < blk_cnt; j++) {
751                         tmp_v_addr =
752                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
753                         tmp_v_addr_next =
754                                 mac_control->rings[i].rx_blocks[(j + 1) %
755                                               blk_cnt].block_virt_addr;
756                         tmp_p_addr =
757                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
758                         tmp_p_addr_next =
759                                 mac_control->rings[i].rx_blocks[(j + 1) %
760                                               blk_cnt].block_dma_addr;
761
762                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
763                         pre_rxd_blk->reserved_2_pNext_RxD_block =
764                             (unsigned long) tmp_v_addr_next;
765                         pre_rxd_blk->pNext_RxD_Blk_physical =
766                             (u64) tmp_p_addr_next;
767                 }
768         }
769         if (nic->rxd_mode == RXD_MODE_3B) {
770                 /*
771                  * Allocation of Storages for buffer addresses in 2BUFF mode
772                  * and the buffers as well.
773                  */
774                 for (i = 0; i < config->rx_ring_num; i++) {
775                         blk_cnt = config->rx_cfg[i].num_rxd /
776                            (rxd_count[nic->rxd_mode]+ 1);
777                         mac_control->rings[i].ba =
778                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
779                                      GFP_KERNEL);
780                         if (!mac_control->rings[i].ba)
781                                 return -ENOMEM;
782                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
783                         for (j = 0; j < blk_cnt; j++) {
784                                 int k = 0;
785                                 mac_control->rings[i].ba[j] =
786                                         kmalloc((sizeof(struct buffAdd) *
787                                                 (rxd_count[nic->rxd_mode] + 1)),
788                                                 GFP_KERNEL);
789                                 if (!mac_control->rings[i].ba[j])
790                                         return -ENOMEM;
791                                 mem_allocated += (sizeof(struct buffAdd) *  \
792                                         (rxd_count[nic->rxd_mode] + 1));
793                                 while (k != rxd_count[nic->rxd_mode]) {
794                                         ba = &mac_control->rings[i].ba[j][k];
795
796                                         ba->ba_0_org = (void *) kmalloc
797                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
798                                         if (!ba->ba_0_org)
799                                                 return -ENOMEM;
800                                         mem_allocated +=
801                                                 (BUF0_LEN + ALIGN_SIZE);
802                                         tmp = (unsigned long)ba->ba_0_org;
803                                         tmp += ALIGN_SIZE;
804                                         tmp &= ~((unsigned long) ALIGN_SIZE);
805                                         ba->ba_0 = (void *) tmp;
806
807                                         ba->ba_1_org = (void *) kmalloc
808                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
809                                         if (!ba->ba_1_org)
810                                                 return -ENOMEM;
811                                         mem_allocated
812                                                 += (BUF1_LEN + ALIGN_SIZE);
813                                         tmp = (unsigned long) ba->ba_1_org;
814                                         tmp += ALIGN_SIZE;
815                                         tmp &= ~((unsigned long) ALIGN_SIZE);
816                                         ba->ba_1 = (void *) tmp;
817                                         k++;
818                                 }
819                         }
820                 }
821         }
822
823         /* Allocation and initialization of Statistics block */
824         size = sizeof(struct stat_block);
825         mac_control->stats_mem = pci_alloc_consistent
826             (nic->pdev, size, &mac_control->stats_mem_phy);
827
828         if (!mac_control->stats_mem) {
829                 /*
830                  * In case of failure, free_shared_mem() is called, which
831                  * should free any memory that was alloced till the
832                  * failure happened.
833                  */
834                 return -ENOMEM;
835         }
836         mem_allocated += size;
837         mac_control->stats_mem_sz = size;
838
839         tmp_v_addr = mac_control->stats_mem;
840         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
841         memset(tmp_v_addr, 0, size);
842         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
843                   (unsigned long long) tmp_p_addr);
844         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
845         return SUCCESS;
846 }
847
848 /**
849  * free_shared_mem - Free the allocated Memory
850  * @nic:  Device private variable.
851  * Description: This function is to free all memory locations allocated by
852  * the init_shared_mem() function and return it to the kernel.
853  */
854
855 static void free_shared_mem(struct s2io_nic *nic)
856 {
857         int i, j, blk_cnt, size;
858         void *tmp_v_addr;
859         dma_addr_t tmp_p_addr;
860         struct mac_info *mac_control;
861         struct config_param *config;
862         int lst_size, lst_per_page;
863         struct net_device *dev;
864         int page_num = 0;
865
866         if (!nic)
867                 return;
868
869         dev = nic->dev;
870
871         mac_control = &nic->mac_control;
872         config = &nic->config;
873
874         lst_size = (sizeof(struct TxD) * config->max_txds);
875         lst_per_page = PAGE_SIZE / lst_size;
876
877         for (i = 0; i < config->tx_fifo_num; i++) {
878                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
879                                                         lst_per_page);
880                 for (j = 0; j < page_num; j++) {
881                         int mem_blks = (j * lst_per_page);
882                         if (!mac_control->fifos[i].list_info)
883                                 return;
884                         if (!mac_control->fifos[i].list_info[mem_blks].
885                                  list_virt_addr)
886                                 break;
887                         pci_free_consistent(nic->pdev, PAGE_SIZE,
888                                             mac_control->fifos[i].
889                                             list_info[mem_blks].
890                                             list_virt_addr,
891                                             mac_control->fifos[i].
892                                             list_info[mem_blks].
893                                             list_phy_addr);
894                         nic->mac_control.stats_info->sw_stat.mem_freed
895                                                 += PAGE_SIZE;
896                 }
897                 /* If we got a zero DMA address during allocation,
898                  * free the page now
899                  */
900                 if (mac_control->zerodma_virt_addr) {
901                         pci_free_consistent(nic->pdev, PAGE_SIZE,
902                                             mac_control->zerodma_virt_addr,
903                                             (dma_addr_t)0);
904                         DBG_PRINT(INIT_DBG,
905                                 "%s: Freeing TxDL with zero DMA addr. ",
906                                 dev->name);
907                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
908                                 mac_control->zerodma_virt_addr);
909                         nic->mac_control.stats_info->sw_stat.mem_freed
910                                                 += PAGE_SIZE;
911                 }
912                 kfree(mac_control->fifos[i].list_info);
913                 nic->mac_control.stats_info->sw_stat.mem_freed +=
914                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
915         }
916
917         size = SIZE_OF_BLOCK;
918         for (i = 0; i < config->rx_ring_num; i++) {
919                 blk_cnt = mac_control->rings[i].block_count;
920                 for (j = 0; j < blk_cnt; j++) {
921                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
922                                 block_virt_addr;
923                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
924                                 block_dma_addr;
925                         if (tmp_v_addr == NULL)
926                                 break;
927                         pci_free_consistent(nic->pdev, size,
928                                             tmp_v_addr, tmp_p_addr);
929                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
930                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
931                         nic->mac_control.stats_info->sw_stat.mem_freed +=
932                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
933                 }
934         }
935
936         if (nic->rxd_mode == RXD_MODE_3B) {
937                 /* Freeing buffer storage addresses in 2BUFF mode. */
938                 for (i = 0; i < config->rx_ring_num; i++) {
939                         blk_cnt = config->rx_cfg[i].num_rxd /
940                             (rxd_count[nic->rxd_mode] + 1);
941                         for (j = 0; j < blk_cnt; j++) {
942                                 int k = 0;
943                                 if (!mac_control->rings[i].ba[j])
944                                         continue;
945                                 while (k != rxd_count[nic->rxd_mode]) {
946                                         struct buffAdd *ba =
947                                                 &mac_control->rings[i].ba[j][k];
948                                         kfree(ba->ba_0_org);
949                                         nic->mac_control.stats_info->sw_stat.\
950                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
951                                         kfree(ba->ba_1_org);
952                                         nic->mac_control.stats_info->sw_stat.\
953                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
954                                         k++;
955                                 }
956                                 kfree(mac_control->rings[i].ba[j]);
957                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
958                                         (sizeof(struct buffAdd) *
959                                         (rxd_count[nic->rxd_mode] + 1));
960                         }
961                         kfree(mac_control->rings[i].ba);
962                         nic->mac_control.stats_info->sw_stat.mem_freed +=
963                         (sizeof(struct buffAdd *) * blk_cnt);
964                 }
965         }
966
967         for (i = 0; i < nic->config.tx_fifo_num; i++) {
968                 if (mac_control->fifos[i].ufo_in_band_v) {
969                         nic->mac_control.stats_info->sw_stat.mem_freed
970                                 += (config->tx_cfg[i].fifo_len * sizeof(u64));
971                         kfree(mac_control->fifos[i].ufo_in_band_v);
972                 }
973         }
974
975         if (mac_control->stats_mem) {
976                 nic->mac_control.stats_info->sw_stat.mem_freed +=
977                         mac_control->stats_mem_sz;
978                 pci_free_consistent(nic->pdev,
979                                     mac_control->stats_mem_sz,
980                                     mac_control->stats_mem,
981                                     mac_control->stats_mem_phy);
982         }
983 }
984
985 /**
986  * s2io_verify_pci_mode -
987  */
988
989 static int s2io_verify_pci_mode(struct s2io_nic *nic)
990 {
991         struct XENA_dev_config __iomem *bar0 = nic->bar0;
992         register u64 val64 = 0;
993         int     mode;
994
995         val64 = readq(&bar0->pci_mode);
996         mode = (u8)GET_PCI_MODE(val64);
997
998         if ( val64 & PCI_MODE_UNKNOWN_MODE)
999                 return -1;      /* Unknown PCI mode */
1000         return mode;
1001 }
1002
1003 #define NEC_VENID   0x1033
1004 #define NEC_DEVID   0x0125
1005 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1006 {
1007         struct pci_dev *tdev = NULL;
1008         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1009                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1010                         if (tdev->bus == s2io_pdev->bus->parent)
1011                                 pci_dev_put(tdev);
1012                                 return 1;
1013                 }
1014         }
1015         return 0;
1016 }
1017
1018 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1019 /**
1020  * s2io_print_pci_mode -
1021  */
1022 static int s2io_print_pci_mode(struct s2io_nic *nic)
1023 {
1024         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1025         register u64 val64 = 0;
1026         int     mode;
1027         struct config_param *config = &nic->config;
1028
1029         val64 = readq(&bar0->pci_mode);
1030         mode = (u8)GET_PCI_MODE(val64);
1031
1032         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1033                 return -1;      /* Unknown PCI mode */
1034
1035         config->bus_speed = bus_speed[mode];
1036
1037         if (s2io_on_nec_bridge(nic->pdev)) {
1038                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1039                                                         nic->dev->name);
1040                 return mode;
1041         }
1042
1043         if (val64 & PCI_MODE_32_BITS) {
1044                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1045         } else {
1046                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1047         }
1048
1049         switch(mode) {
1050                 case PCI_MODE_PCI_33:
1051                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1052                         break;
1053                 case PCI_MODE_PCI_66:
1054                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1055                         break;
1056                 case PCI_MODE_PCIX_M1_66:
1057                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1058                         break;
1059                 case PCI_MODE_PCIX_M1_100:
1060                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1061                         break;
1062                 case PCI_MODE_PCIX_M1_133:
1063                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1064                         break;
1065                 case PCI_MODE_PCIX_M2_66:
1066                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1067                         break;
1068                 case PCI_MODE_PCIX_M2_100:
1069                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1070                         break;
1071                 case PCI_MODE_PCIX_M2_133:
1072                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1073                         break;
1074                 default:
1075                         return -1;      /* Unsupported bus speed */
1076         }
1077
1078         return mode;
1079 }
1080
1081 /**
1082  *  init_nic - Initialization of hardware
1083  *  @nic: device peivate variable
1084  *  Description: The function sequentially configures every block
1085  *  of the H/W from their reset values.
1086  *  Return Value:  SUCCESS on success and
1087  *  '-1' on failure (endian settings incorrect).
1088  */
1089
1090 static int init_nic(struct s2io_nic *nic)
1091 {
1092         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1093         struct net_device *dev = nic->dev;
1094         register u64 val64 = 0;
1095         void __iomem *add;
1096         u32 time;
1097         int i, j;
1098         struct mac_info *mac_control;
1099         struct config_param *config;
1100         int dtx_cnt = 0;
1101         unsigned long long mem_share;
1102         int mem_size;
1103
1104         mac_control = &nic->mac_control;
1105         config = &nic->config;
1106
1107         /* to set the swapper controle on the card */
1108         if(s2io_set_swapper(nic)) {
1109                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1110                 return -EIO;
1111         }
1112
1113         /*
1114          * Herc requires EOI to be removed from reset before XGXS, so..
1115          */
1116         if (nic->device_type & XFRAME_II_DEVICE) {
1117                 val64 = 0xA500000000ULL;
1118                 writeq(val64, &bar0->sw_reset);
1119                 msleep(500);
1120                 val64 = readq(&bar0->sw_reset);
1121         }
1122
1123         /* Remove XGXS from reset state */
1124         val64 = 0;
1125         writeq(val64, &bar0->sw_reset);
1126         msleep(500);
1127         val64 = readq(&bar0->sw_reset);
1128
1129         /* Ensure that it's safe to access registers by checking
1130          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1131          */
1132         if (nic->device_type == XFRAME_II_DEVICE) {
1133                 for (i = 0; i < 50; i++) {
1134                         val64 = readq(&bar0->adapter_status);
1135                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1136                                 break;
1137                         msleep(10);
1138                 }
1139                 if (i == 50)
1140                         return -ENODEV;
1141         }
1142
1143         /*  Enable Receiving broadcasts */
1144         add = &bar0->mac_cfg;
1145         val64 = readq(&bar0->mac_cfg);
1146         val64 |= MAC_RMAC_BCAST_ENABLE;
1147         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1148         writel((u32) val64, add);
1149         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1150         writel((u32) (val64 >> 32), (add + 4));
1151
1152         /* Read registers in all blocks */
1153         val64 = readq(&bar0->mac_int_mask);
1154         val64 = readq(&bar0->mc_int_mask);
1155         val64 = readq(&bar0->xgxs_int_mask);
1156
1157         /*  Set MTU */
1158         val64 = dev->mtu;
1159         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1160
1161         if (nic->device_type & XFRAME_II_DEVICE) {
1162                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1163                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1164                                           &bar0->dtx_control, UF);
1165                         if (dtx_cnt & 0x1)
1166                                 msleep(1); /* Necessary!! */
1167                         dtx_cnt++;
1168                 }
1169         } else {
1170                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1171                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1172                                           &bar0->dtx_control, UF);
1173                         val64 = readq(&bar0->dtx_control);
1174                         dtx_cnt++;
1175                 }
1176         }
1177
1178         /*  Tx DMA Initialization */
1179         val64 = 0;
1180         writeq(val64, &bar0->tx_fifo_partition_0);
1181         writeq(val64, &bar0->tx_fifo_partition_1);
1182         writeq(val64, &bar0->tx_fifo_partition_2);
1183         writeq(val64, &bar0->tx_fifo_partition_3);
1184
1185
1186         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1187                 val64 |=
1188                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1189                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1190                                     ((i * 32) + 5), 3);
1191
1192                 if (i == (config->tx_fifo_num - 1)) {
1193                         if (i % 2 == 0)
1194                                 i++;
1195                 }
1196
1197                 switch (i) {
1198                 case 1:
1199                         writeq(val64, &bar0->tx_fifo_partition_0);
1200                         val64 = 0;
1201                         break;
1202                 case 3:
1203                         writeq(val64, &bar0->tx_fifo_partition_1);
1204                         val64 = 0;
1205                         break;
1206                 case 5:
1207                         writeq(val64, &bar0->tx_fifo_partition_2);
1208                         val64 = 0;
1209                         break;
1210                 case 7:
1211                         writeq(val64, &bar0->tx_fifo_partition_3);
1212                         break;
1213                 }
1214         }
1215
1216         /*
1217          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1218          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1219          */
1220         if ((nic->device_type == XFRAME_I_DEVICE) &&
1221                 (nic->pdev->revision < 4))
1222                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1223
1224         val64 = readq(&bar0->tx_fifo_partition_0);
1225         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1226                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1227
1228         /*
1229          * Initialization of Tx_PA_CONFIG register to ignore packet
1230          * integrity checking.
1231          */
1232         val64 = readq(&bar0->tx_pa_cfg);
1233         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1234             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1235         writeq(val64, &bar0->tx_pa_cfg);
1236
1237         /* Rx DMA intialization. */
1238         val64 = 0;
1239         for (i = 0; i < config->rx_ring_num; i++) {
1240                 val64 |=
1241                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1242                          3);
1243         }
1244         writeq(val64, &bar0->rx_queue_priority);
1245
1246         /*
1247          * Allocating equal share of memory to all the
1248          * configured Rings.
1249          */
1250         val64 = 0;
1251         if (nic->device_type & XFRAME_II_DEVICE)
1252                 mem_size = 32;
1253         else
1254                 mem_size = 64;
1255
1256         for (i = 0; i < config->rx_ring_num; i++) {
1257                 switch (i) {
1258                 case 0:
1259                         mem_share = (mem_size / config->rx_ring_num +
1260                                      mem_size % config->rx_ring_num);
1261                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1262                         continue;
1263                 case 1:
1264                         mem_share = (mem_size / config->rx_ring_num);
1265                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1266                         continue;
1267                 case 2:
1268                         mem_share = (mem_size / config->rx_ring_num);
1269                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1270                         continue;
1271                 case 3:
1272                         mem_share = (mem_size / config->rx_ring_num);
1273                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1274                         continue;
1275                 case 4:
1276                         mem_share = (mem_size / config->rx_ring_num);
1277                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1278                         continue;
1279                 case 5:
1280                         mem_share = (mem_size / config->rx_ring_num);
1281                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1282                         continue;
1283                 case 6:
1284                         mem_share = (mem_size / config->rx_ring_num);
1285                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1286                         continue;
1287                 case 7:
1288                         mem_share = (mem_size / config->rx_ring_num);
1289                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1290                         continue;
1291                 }
1292         }
1293         writeq(val64, &bar0->rx_queue_cfg);
1294
1295         /*
1296          * Filling Tx round robin registers
1297          * as per the number of FIFOs
1298          */
1299         switch (config->tx_fifo_num) {
1300         case 1:
1301                 val64 = 0x0000000000000000ULL;
1302                 writeq(val64, &bar0->tx_w_round_robin_0);
1303                 writeq(val64, &bar0->tx_w_round_robin_1);
1304                 writeq(val64, &bar0->tx_w_round_robin_2);
1305                 writeq(val64, &bar0->tx_w_round_robin_3);
1306                 writeq(val64, &bar0->tx_w_round_robin_4);
1307                 break;
1308         case 2:
1309                 val64 = 0x0000010000010000ULL;
1310                 writeq(val64, &bar0->tx_w_round_robin_0);
1311                 val64 = 0x0100000100000100ULL;
1312                 writeq(val64, &bar0->tx_w_round_robin_1);
1313                 val64 = 0x0001000001000001ULL;
1314                 writeq(val64, &bar0->tx_w_round_robin_2);
1315                 val64 = 0x0000010000010000ULL;
1316                 writeq(val64, &bar0->tx_w_round_robin_3);
1317                 val64 = 0x0100000000000000ULL;
1318                 writeq(val64, &bar0->tx_w_round_robin_4);
1319                 break;
1320         case 3:
1321                 val64 = 0x0001000102000001ULL;
1322                 writeq(val64, &bar0->tx_w_round_robin_0);
1323                 val64 = 0x0001020000010001ULL;
1324                 writeq(val64, &bar0->tx_w_round_robin_1);
1325                 val64 = 0x0200000100010200ULL;
1326                 writeq(val64, &bar0->tx_w_round_robin_2);
1327                 val64 = 0x0001000102000001ULL;
1328                 writeq(val64, &bar0->tx_w_round_robin_3);
1329                 val64 = 0x0001020000000000ULL;
1330                 writeq(val64, &bar0->tx_w_round_robin_4);
1331                 break;
1332         case 4:
1333                 val64 = 0x0001020300010200ULL;
1334                 writeq(val64, &bar0->tx_w_round_robin_0);
1335                 val64 = 0x0100000102030001ULL;
1336                 writeq(val64, &bar0->tx_w_round_robin_1);
1337                 val64 = 0x0200010000010203ULL;
1338                 writeq(val64, &bar0->tx_w_round_robin_2);
1339                 val64 = 0x0001020001000001ULL;
1340                 writeq(val64, &bar0->tx_w_round_robin_3);
1341                 val64 = 0x0203000100000000ULL;
1342                 writeq(val64, &bar0->tx_w_round_robin_4);
1343                 break;
1344         case 5:
1345                 val64 = 0x0001000203000102ULL;
1346                 writeq(val64, &bar0->tx_w_round_robin_0);
1347                 val64 = 0x0001020001030004ULL;
1348                 writeq(val64, &bar0->tx_w_round_robin_1);
1349                 val64 = 0x0001000203000102ULL;
1350                 writeq(val64, &bar0->tx_w_round_robin_2);
1351                 val64 = 0x0001020001030004ULL;
1352                 writeq(val64, &bar0->tx_w_round_robin_3);
1353                 val64 = 0x0001000000000000ULL;
1354                 writeq(val64, &bar0->tx_w_round_robin_4);
1355                 break;
1356         case 6:
1357                 val64 = 0x0001020304000102ULL;
1358                 writeq(val64, &bar0->tx_w_round_robin_0);
1359                 val64 = 0x0304050001020001ULL;
1360                 writeq(val64, &bar0->tx_w_round_robin_1);
1361                 val64 = 0x0203000100000102ULL;
1362                 writeq(val64, &bar0->tx_w_round_robin_2);
1363                 val64 = 0x0304000102030405ULL;
1364                 writeq(val64, &bar0->tx_w_round_robin_3);
1365                 val64 = 0x0001000200000000ULL;
1366                 writeq(val64, &bar0->tx_w_round_robin_4);
1367                 break;
1368         case 7:
1369                 val64 = 0x0001020001020300ULL;
1370                 writeq(val64, &bar0->tx_w_round_robin_0);
1371                 val64 = 0x0102030400010203ULL;
1372                 writeq(val64, &bar0->tx_w_round_robin_1);
1373                 val64 = 0x0405060001020001ULL;
1374                 writeq(val64, &bar0->tx_w_round_robin_2);
1375                 val64 = 0x0304050000010200ULL;
1376                 writeq(val64, &bar0->tx_w_round_robin_3);
1377                 val64 = 0x0102030000000000ULL;
1378                 writeq(val64, &bar0->tx_w_round_robin_4);
1379                 break;
1380         case 8:
1381                 val64 = 0x0001020300040105ULL;
1382                 writeq(val64, &bar0->tx_w_round_robin_0);
1383                 val64 = 0x0200030106000204ULL;
1384                 writeq(val64, &bar0->tx_w_round_robin_1);
1385                 val64 = 0x0103000502010007ULL;
1386                 writeq(val64, &bar0->tx_w_round_robin_2);
1387                 val64 = 0x0304010002060500ULL;
1388                 writeq(val64, &bar0->tx_w_round_robin_3);
1389                 val64 = 0x0103020400000000ULL;
1390                 writeq(val64, &bar0->tx_w_round_robin_4);
1391                 break;
1392         }
1393
1394         /* Enable all configured Tx FIFO partitions */
1395         val64 = readq(&bar0->tx_fifo_partition_0);
1396         val64 |= (TX_FIFO_PARTITION_EN);
1397         writeq(val64, &bar0->tx_fifo_partition_0);
1398
1399         /* Filling the Rx round robin registers as per the
1400          * number of Rings and steering based on QoS.
1401          */
1402         switch (config->rx_ring_num) {
1403         case 1:
1404                 val64 = 0x8080808080808080ULL;
1405                 writeq(val64, &bar0->rts_qos_steering);
1406                 break;
1407         case 2:
1408                 val64 = 0x0000010000010000ULL;
1409                 writeq(val64, &bar0->rx_w_round_robin_0);
1410                 val64 = 0x0100000100000100ULL;
1411                 writeq(val64, &bar0->rx_w_round_robin_1);
1412                 val64 = 0x0001000001000001ULL;
1413                 writeq(val64, &bar0->rx_w_round_robin_2);
1414                 val64 = 0x0000010000010000ULL;
1415                 writeq(val64, &bar0->rx_w_round_robin_3);
1416                 val64 = 0x0100000000000000ULL;
1417                 writeq(val64, &bar0->rx_w_round_robin_4);
1418
1419                 val64 = 0x8080808040404040ULL;
1420                 writeq(val64, &bar0->rts_qos_steering);
1421                 break;
1422         case 3:
1423                 val64 = 0x0001000102000001ULL;
1424                 writeq(val64, &bar0->rx_w_round_robin_0);
1425                 val64 = 0x0001020000010001ULL;
1426                 writeq(val64, &bar0->rx_w_round_robin_1);
1427                 val64 = 0x0200000100010200ULL;
1428                 writeq(val64, &bar0->rx_w_round_robin_2);
1429                 val64 = 0x0001000102000001ULL;
1430                 writeq(val64, &bar0->rx_w_round_robin_3);
1431                 val64 = 0x0001020000000000ULL;
1432                 writeq(val64, &bar0->rx_w_round_robin_4);
1433
1434                 val64 = 0x8080804040402020ULL;
1435                 writeq(val64, &bar0->rts_qos_steering);
1436                 break;
1437         case 4:
1438                 val64 = 0x0001020300010200ULL;
1439                 writeq(val64, &bar0->rx_w_round_robin_0);
1440                 val64 = 0x0100000102030001ULL;
1441                 writeq(val64, &bar0->rx_w_round_robin_1);
1442                 val64 = 0x0200010000010203ULL;
1443                 writeq(val64, &bar0->rx_w_round_robin_2);
1444                 val64 = 0x0001020001000001ULL;
1445                 writeq(val64, &bar0->rx_w_round_robin_3);
1446                 val64 = 0x0203000100000000ULL;
1447                 writeq(val64, &bar0->rx_w_round_robin_4);
1448
1449                 val64 = 0x8080404020201010ULL;
1450                 writeq(val64, &bar0->rts_qos_steering);
1451                 break;
1452         case 5:
1453                 val64 = 0x0001000203000102ULL;
1454                 writeq(val64, &bar0->rx_w_round_robin_0);
1455                 val64 = 0x0001020001030004ULL;
1456                 writeq(val64, &bar0->rx_w_round_robin_1);
1457                 val64 = 0x0001000203000102ULL;
1458                 writeq(val64, &bar0->rx_w_round_robin_2);
1459                 val64 = 0x0001020001030004ULL;
1460                 writeq(val64, &bar0->rx_w_round_robin_3);
1461                 val64 = 0x0001000000000000ULL;
1462                 writeq(val64, &bar0->rx_w_round_robin_4);
1463
1464                 val64 = 0x8080404020201008ULL;
1465                 writeq(val64, &bar0->rts_qos_steering);
1466                 break;
1467         case 6:
1468                 val64 = 0x0001020304000102ULL;
1469                 writeq(val64, &bar0->rx_w_round_robin_0);
1470                 val64 = 0x0304050001020001ULL;
1471                 writeq(val64, &bar0->rx_w_round_robin_1);
1472                 val64 = 0x0203000100000102ULL;
1473                 writeq(val64, &bar0->rx_w_round_robin_2);
1474                 val64 = 0x0304000102030405ULL;
1475                 writeq(val64, &bar0->rx_w_round_robin_3);
1476                 val64 = 0x0001000200000000ULL;
1477                 writeq(val64, &bar0->rx_w_round_robin_4);
1478
1479                 val64 = 0x8080404020100804ULL;
1480                 writeq(val64, &bar0->rts_qos_steering);
1481                 break;
1482         case 7:
1483                 val64 = 0x0001020001020300ULL;
1484                 writeq(val64, &bar0->rx_w_round_robin_0);
1485                 val64 = 0x0102030400010203ULL;
1486                 writeq(val64, &bar0->rx_w_round_robin_1);
1487                 val64 = 0x0405060001020001ULL;
1488                 writeq(val64, &bar0->rx_w_round_robin_2);
1489                 val64 = 0x0304050000010200ULL;
1490                 writeq(val64, &bar0->rx_w_round_robin_3);
1491                 val64 = 0x0102030000000000ULL;
1492                 writeq(val64, &bar0->rx_w_round_robin_4);
1493
1494                 val64 = 0x8080402010080402ULL;
1495                 writeq(val64, &bar0->rts_qos_steering);
1496                 break;
1497         case 8:
1498                 val64 = 0x0001020300040105ULL;
1499                 writeq(val64, &bar0->rx_w_round_robin_0);
1500                 val64 = 0x0200030106000204ULL;
1501                 writeq(val64, &bar0->rx_w_round_robin_1);
1502                 val64 = 0x0103000502010007ULL;
1503                 writeq(val64, &bar0->rx_w_round_robin_2);
1504                 val64 = 0x0304010002060500ULL;
1505                 writeq(val64, &bar0->rx_w_round_robin_3);
1506                 val64 = 0x0103020400000000ULL;
1507                 writeq(val64, &bar0->rx_w_round_robin_4);
1508
1509                 val64 = 0x8040201008040201ULL;
1510                 writeq(val64, &bar0->rts_qos_steering);
1511                 break;
1512         }
1513
1514         /* UDP Fix */
1515         val64 = 0;
1516         for (i = 0; i < 8; i++)
1517                 writeq(val64, &bar0->rts_frm_len_n[i]);
1518
1519         /* Set the default rts frame length for the rings configured */
1520         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1521         for (i = 0 ; i < config->rx_ring_num ; i++)
1522                 writeq(val64, &bar0->rts_frm_len_n[i]);
1523
1524         /* Set the frame length for the configured rings
1525          * desired by the user
1526          */
1527         for (i = 0; i < config->rx_ring_num; i++) {
1528                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1529                  * specified frame length steering.
1530                  * If the user provides the frame length then program
1531                  * the rts_frm_len register for those values or else
1532                  * leave it as it is.
1533                  */
1534                 if (rts_frm_len[i] != 0) {
1535                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1536                                 &bar0->rts_frm_len_n[i]);
1537                 }
1538         }
1539
1540         /* Disable differentiated services steering logic */
1541         for (i = 0; i < 64; i++) {
1542                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1543                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1544                                 dev->name);
1545                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1546                         return -ENODEV;
1547                 }
1548         }
1549
1550         /* Program statistics memory */
1551         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1552
1553         if (nic->device_type == XFRAME_II_DEVICE) {
1554                 val64 = STAT_BC(0x320);
1555                 writeq(val64, &bar0->stat_byte_cnt);
1556         }
1557
1558         /*
1559          * Initializing the sampling rate for the device to calculate the
1560          * bandwidth utilization.
1561          */
1562         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1563             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1564         writeq(val64, &bar0->mac_link_util);
1565
1566
1567         /*
1568          * Initializing the Transmit and Receive Traffic Interrupt
1569          * Scheme.
1570          */
1571         /*
1572          * TTI Initialization. Default Tx timer gets us about
1573          * 250 interrupts per sec. Continuous interrupts are enabled
1574          * by default.
1575          */
1576         if (nic->device_type == XFRAME_II_DEVICE) {
1577                 int count = (nic->config.bus_speed * 125)/2;
1578                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1579         } else {
1580
1581                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1582         }
1583         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1584             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1585             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1586                 if (use_continuous_tx_intrs)
1587                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1588         writeq(val64, &bar0->tti_data1_mem);
1589
1590         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1591             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1592             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1593         writeq(val64, &bar0->tti_data2_mem);
1594
1595         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1596         writeq(val64, &bar0->tti_command_mem);
1597
1598         /*
1599          * Once the operation completes, the Strobe bit of the command
1600          * register will be reset. We poll for this particular condition
1601          * We wait for a maximum of 500ms for the operation to complete,
1602          * if it's not complete by then we return error.
1603          */
1604         time = 0;
1605         while (TRUE) {
1606                 val64 = readq(&bar0->tti_command_mem);
1607                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1608                         break;
1609                 }
1610                 if (time > 10) {
1611                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1612                                   dev->name);
1613                         return -ENODEV;
1614                 }
1615                 msleep(50);
1616                 time++;
1617         }
1618
1619         /* RTI Initialization */
1620         if (nic->device_type == XFRAME_II_DEVICE) {
1621                 /*
1622                  * Programmed to generate Apprx 500 Intrs per
1623                  * second
1624                  */
1625                 int count = (nic->config.bus_speed * 125)/4;
1626                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1627         } else
1628                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1629         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1630                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1631                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1632
1633         writeq(val64, &bar0->rti_data1_mem);
1634
1635         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1636                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1637         if (nic->config.intr_type == MSI_X)
1638             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1639                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1640         else
1641             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1642                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1643         writeq(val64, &bar0->rti_data2_mem);
1644
1645         for (i = 0; i < config->rx_ring_num; i++) {
1646                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1647                                 | RTI_CMD_MEM_OFFSET(i);
1648                 writeq(val64, &bar0->rti_command_mem);
1649
1650                 /*
1651                  * Once the operation completes, the Strobe bit of the
1652                  * command register will be reset. We poll for this
1653                  * particular condition. We wait for a maximum of 500ms
1654                  * for the operation to complete, if it's not complete
1655                  * by then we return error.
1656                  */
1657                 time = 0;
1658                 while (TRUE) {
1659                         val64 = readq(&bar0->rti_command_mem);
1660                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1661                                 break;
1662
1663                         if (time > 10) {
1664                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1665                                           dev->name);
1666                                 return -ENODEV;
1667                         }
1668                         time++;
1669                         msleep(50);
1670                 }
1671         }
1672
1673         /*
1674          * Initializing proper values as Pause threshold into all
1675          * the 8 Queues on Rx side.
1676          */
1677         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1678         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1679
1680         /* Disable RMAC PAD STRIPPING */
1681         add = &bar0->mac_cfg;
1682         val64 = readq(&bar0->mac_cfg);
1683         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1684         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1685         writel((u32) (val64), add);
1686         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1687         writel((u32) (val64 >> 32), (add + 4));
1688         val64 = readq(&bar0->mac_cfg);
1689
1690         /* Enable FCS stripping by adapter */
1691         add = &bar0->mac_cfg;
1692         val64 = readq(&bar0->mac_cfg);
1693         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1694         if (nic->device_type == XFRAME_II_DEVICE)
1695                 writeq(val64, &bar0->mac_cfg);
1696         else {
1697                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1698                 writel((u32) (val64), add);
1699                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1700                 writel((u32) (val64 >> 32), (add + 4));
1701         }
1702
1703         /*
1704          * Set the time value to be inserted in the pause frame
1705          * generated by xena.
1706          */
1707         val64 = readq(&bar0->rmac_pause_cfg);
1708         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1709         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1710         writeq(val64, &bar0->rmac_pause_cfg);
1711
1712         /*
1713          * Set the Threshold Limit for Generating the pause frame
1714          * If the amount of data in any Queue exceeds ratio of
1715          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1716          * pause frame is generated
1717          */
1718         val64 = 0;
1719         for (i = 0; i < 4; i++) {
1720                 val64 |=
1721                     (((u64) 0xFF00 | nic->mac_control.
1722                       mc_pause_threshold_q0q3)
1723                      << (i * 2 * 8));
1724         }
1725         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1726
1727         val64 = 0;
1728         for (i = 0; i < 4; i++) {
1729                 val64 |=
1730                     (((u64) 0xFF00 | nic->mac_control.
1731                       mc_pause_threshold_q4q7)
1732                      << (i * 2 * 8));
1733         }
1734         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1735
1736         /*
1737          * TxDMA will stop Read request if the number of read split has
1738          * exceeded the limit pointed by shared_splits
1739          */
1740         val64 = readq(&bar0->pic_control);
1741         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1742         writeq(val64, &bar0->pic_control);
1743
1744         if (nic->config.bus_speed == 266) {
1745                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1746                 writeq(0x0, &bar0->read_retry_delay);
1747                 writeq(0x0, &bar0->write_retry_delay);
1748         }
1749
1750         /*
1751          * Programming the Herc to split every write transaction
1752          * that does not start on an ADB to reduce disconnects.
1753          */
1754         if (nic->device_type == XFRAME_II_DEVICE) {
1755                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1756                         MISC_LINK_STABILITY_PRD(3);
1757                 writeq(val64, &bar0->misc_control);
1758                 val64 = readq(&bar0->pic_control2);
1759                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1760                 writeq(val64, &bar0->pic_control2);
1761         }
1762         if (strstr(nic->product_name, "CX4")) {
1763                 val64 = TMAC_AVG_IPG(0x17);
1764                 writeq(val64, &bar0->tmac_avg_ipg);
1765         }
1766
1767         return SUCCESS;
1768 }
1769 #define LINK_UP_DOWN_INTERRUPT          1
1770 #define MAC_RMAC_ERR_TIMER              2
1771
1772 static int s2io_link_fault_indication(struct s2io_nic *nic)
1773 {
1774         if (nic->config.intr_type != INTA)
1775                 return MAC_RMAC_ERR_TIMER;
1776         if (nic->device_type == XFRAME_II_DEVICE)
1777                 return LINK_UP_DOWN_INTERRUPT;
1778         else
1779                 return MAC_RMAC_ERR_TIMER;
1780 }
1781
1782 /**
1783  *  do_s2io_write_bits -  update alarm bits in alarm register
1784  *  @value: alarm bits
1785  *  @flag: interrupt status
1786  *  @addr: address value
1787  *  Description: update alarm bits in alarm register
1788  *  Return Value:
1789  *  NONE.
1790  */
1791 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1792 {
1793         u64 temp64;
1794
1795         temp64 = readq(addr);
1796
1797         if(flag == ENABLE_INTRS)
1798                 temp64 &= ~((u64) value);
1799         else
1800                 temp64 |= ((u64) value);
1801         writeq(temp64, addr);
1802 }
1803
1804 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1805 {
1806         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1807         register u64 gen_int_mask = 0;
1808
1809         if (mask & TX_DMA_INTR) {
1810
1811                 gen_int_mask |= TXDMA_INT_M;
1812
1813                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1814                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1815                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1816                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1817
1818                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1819                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1820                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1821                                 &bar0->pfc_err_mask);
1822
1823                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1824                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1825                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1826
1827                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1828                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1829                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1830                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1831                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1832                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1833
1834                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1835                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1836
1837                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1838                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1839                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1840                                 flag, &bar0->lso_err_mask);
1841
1842                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1843                                 flag, &bar0->tpa_err_mask);
1844
1845                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1846
1847         }
1848
1849         if (mask & TX_MAC_INTR) {
1850                 gen_int_mask |= TXMAC_INT_M;
1851                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1852                                 &bar0->mac_int_mask);
1853                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1854                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1855                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1856                                 flag, &bar0->mac_tmac_err_mask);
1857         }
1858
1859         if (mask & TX_XGXS_INTR) {
1860                 gen_int_mask |= TXXGXS_INT_M;
1861                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1862                                 &bar0->xgxs_int_mask);
1863                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1864                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1865                                 flag, &bar0->xgxs_txgxs_err_mask);
1866         }
1867
1868         if (mask & RX_DMA_INTR) {
1869                 gen_int_mask |= RXDMA_INT_M;
1870                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1871                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1872                                 flag, &bar0->rxdma_int_mask);
1873                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1874                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1875                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1876                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1877                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1878                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1879                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1880                                 &bar0->prc_pcix_err_mask);
1881                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1882                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1883                                 &bar0->rpa_err_mask);
1884                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1885                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1886                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1887                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1888                                 flag, &bar0->rda_err_mask);
1889                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1890                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1891                                 flag, &bar0->rti_err_mask);
1892         }
1893
1894         if (mask & RX_MAC_INTR) {
1895                 gen_int_mask |= RXMAC_INT_M;
1896                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1897                                 &bar0->mac_int_mask);
1898                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1899                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1900                                 RMAC_DOUBLE_ECC_ERR |
1901                                 RMAC_LINK_STATE_CHANGE_INT,
1902                                 flag, &bar0->mac_rmac_err_mask);
1903         }
1904
1905         if (mask & RX_XGXS_INTR)
1906         {
1907                 gen_int_mask |= RXXGXS_INT_M;
1908                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1909                                 &bar0->xgxs_int_mask);
1910                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1911                                 &bar0->xgxs_rxgxs_err_mask);
1912         }
1913
1914         if (mask & MC_INTR) {
1915                 gen_int_mask |= MC_INT_M;
1916                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1917                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1918                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1919                                 &bar0->mc_err_mask);
1920         }
1921         nic->general_int_mask = gen_int_mask;
1922
1923         /* Remove this line when alarm interrupts are enabled */
1924         nic->general_int_mask = 0;
1925 }
1926 /**
1927  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1928  *  @nic: device private variable,
1929  *  @mask: A mask indicating which Intr block must be modified and,
1930  *  @flag: A flag indicating whether to enable or disable the Intrs.
1931  *  Description: This function will either disable or enable the interrupts
1932  *  depending on the flag argument. The mask argument can be used to
1933  *  enable/disable any Intr block.
1934  *  Return Value: NONE.
1935  */
1936
1937 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1938 {
1939         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1940         register u64 temp64 = 0, intr_mask = 0;
1941
1942         intr_mask = nic->general_int_mask;
1943
1944         /*  Top level interrupt classification */
1945         /*  PIC Interrupts */
1946         if (mask & TX_PIC_INTR) {
1947                 /*  Enable PIC Intrs in the general intr mask register */
1948                 intr_mask |= TXPIC_INT_M;
1949                 if (flag == ENABLE_INTRS) {
1950                         /*
1951                          * If Hercules adapter enable GPIO otherwise
1952                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1953                          * interrupts for now.
1954                          * TODO
1955                          */
1956                         if (s2io_link_fault_indication(nic) ==
1957                                         LINK_UP_DOWN_INTERRUPT ) {
1958                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
1959                                                 &bar0->pic_int_mask);
1960                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1961                                                 &bar0->gpio_int_mask);
1962                         } else
1963                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1964                 } else if (flag == DISABLE_INTRS) {
1965                         /*
1966                          * Disable PIC Intrs in the general
1967                          * intr mask register
1968                          */
1969                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1970                 }
1971         }
1972
1973         /*  Tx traffic interrupts */
1974         if (mask & TX_TRAFFIC_INTR) {
1975                 intr_mask |= TXTRAFFIC_INT_M;
1976                 if (flag == ENABLE_INTRS) {
1977                         /*
1978                          * Enable all the Tx side interrupts
1979                          * writing 0 Enables all 64 TX interrupt levels
1980                          */
1981                         writeq(0x0, &bar0->tx_traffic_mask);
1982                 } else if (flag == DISABLE_INTRS) {
1983                         /*
1984                          * Disable Tx Traffic Intrs in the general intr mask
1985                          * register.
1986                          */
1987                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1988                 }
1989         }
1990
1991         /*  Rx traffic interrupts */
1992         if (mask & RX_TRAFFIC_INTR) {
1993                 intr_mask |= RXTRAFFIC_INT_M;
1994                 if (flag == ENABLE_INTRS) {
1995                         /* writing 0 Enables all 8 RX interrupt levels */
1996                         writeq(0x0, &bar0->rx_traffic_mask);
1997                 } else if (flag == DISABLE_INTRS) {
1998                         /*
1999                          * Disable Rx Traffic Intrs in the general intr mask
2000                          * register.
2001                          */
2002                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2003                 }
2004         }
2005
2006         temp64 = readq(&bar0->general_int_mask);
2007         if (flag == ENABLE_INTRS)
2008                 temp64 &= ~((u64) intr_mask);
2009         else
2010                 temp64 = DISABLE_ALL_INTRS;
2011         writeq(temp64, &bar0->general_int_mask);
2012
2013         nic->general_int_mask = readq(&bar0->general_int_mask);
2014 }
2015
2016 /**
2017  *  verify_pcc_quiescent- Checks for PCC quiescent state
2018  *  Return: 1 If PCC is quiescence
2019  *          0 If PCC is not quiescence
2020  */
2021 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2022 {
2023         int ret = 0, herc;
2024         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2025         u64 val64 = readq(&bar0->adapter_status);
2026
2027         herc = (sp->device_type == XFRAME_II_DEVICE);
2028
2029         if (flag == FALSE) {
2030                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2031                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2032                                 ret = 1;
2033                 } else {
2034                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2035                                 ret = 1;
2036                 }
2037         } else {
2038                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2039                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2040                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2041                                 ret = 1;
2042                 } else {
2043                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2044                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2045                                 ret = 1;
2046                 }
2047         }
2048
2049         return ret;
2050 }
2051 /**
2052  *  verify_xena_quiescence - Checks whether the H/W is ready
2053  *  Description: Returns whether the H/W is ready to go or not. Depending
2054  *  on whether adapter enable bit was written or not the comparison
2055  *  differs and the calling function passes the input argument flag to
2056  *  indicate this.
2057  *  Return: 1 If xena is quiescence
2058  *          0 If Xena is not quiescence
2059  */
2060
2061 static int verify_xena_quiescence(struct s2io_nic *sp)
2062 {
2063         int  mode;
2064         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2065         u64 val64 = readq(&bar0->adapter_status);
2066         mode = s2io_verify_pci_mode(sp);
2067
2068         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2069                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2070                 return 0;
2071         }
2072         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2073         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2074                 return 0;
2075         }
2076         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2077                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2078                 return 0;
2079         }
2080         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2081                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2082                 return 0;
2083         }
2084         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2085                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2086                 return 0;
2087         }
2088         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2089                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2090                 return 0;
2091         }
2092         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2093                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2094                 return 0;
2095         }
2096         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2097                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2098                 return 0;
2099         }
2100
2101         /*
2102          * In PCI 33 mode, the P_PLL is not used, and therefore,
2103          * the the P_PLL_LOCK bit in the adapter_status register will
2104          * not be asserted.
2105          */
2106         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2107                 sp->device_type == XFRAME_II_DEVICE && mode !=
2108                 PCI_MODE_PCI_33) {
2109                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2110                 return 0;
2111         }
2112         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2113                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2114                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2115                 return 0;
2116         }
2117         return 1;
2118 }
2119
2120 /**
2121  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2122  * @sp: Pointer to device specifc structure
2123  * Description :
2124  * New procedure to clear mac address reading  problems on Alpha platforms
2125  *
2126  */
2127
2128 static void fix_mac_address(struct s2io_nic * sp)
2129 {
2130         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2131         u64 val64;
2132         int i = 0;
2133
2134         while (fix_mac[i] != END_SIGN) {
2135                 writeq(fix_mac[i++], &bar0->gpio_control);
2136                 udelay(10);
2137                 val64 = readq(&bar0->gpio_control);
2138         }
2139 }
2140
2141 /**
2142  *  start_nic - Turns the device on
2143  *  @nic : device private variable.
2144  *  Description:
2145  *  This function actually turns the device on. Before this  function is
2146  *  called,all Registers are configured from their reset states
2147  *  and shared memory is allocated but the NIC is still quiescent. On
2148  *  calling this function, the device interrupts are cleared and the NIC is
2149  *  literally switched on by writing into the adapter control register.
2150  *  Return Value:
2151  *  SUCCESS on success and -1 on failure.
2152  */
2153
2154 static int start_nic(struct s2io_nic *nic)
2155 {
2156         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2157         struct net_device *dev = nic->dev;
2158         register u64 val64 = 0;
2159         u16 subid, i;
2160         struct mac_info *mac_control;
2161         struct config_param *config;
2162
2163         mac_control = &nic->mac_control;
2164         config = &nic->config;
2165
2166         /*  PRC Initialization and configuration */
2167         for (i = 0; i < config->rx_ring_num; i++) {
2168                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2169                        &bar0->prc_rxd0_n[i]);
2170
2171                 val64 = readq(&bar0->prc_ctrl_n[i]);
2172                 if (nic->rxd_mode == RXD_MODE_1)
2173                         val64 |= PRC_CTRL_RC_ENABLED;
2174                 else
2175                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2176                 if (nic->device_type == XFRAME_II_DEVICE)
2177                         val64 |= PRC_CTRL_GROUP_READS;
2178                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2179                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2180                 writeq(val64, &bar0->prc_ctrl_n[i]);
2181         }
2182
2183         if (nic->rxd_mode == RXD_MODE_3B) {
2184                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2185                 val64 = readq(&bar0->rx_pa_cfg);
2186                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2187                 writeq(val64, &bar0->rx_pa_cfg);
2188         }
2189
2190         if (vlan_tag_strip == 0) {
2191                 val64 = readq(&bar0->rx_pa_cfg);
2192                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2193                 writeq(val64, &bar0->rx_pa_cfg);
2194                 vlan_strip_flag = 0;
2195         }
2196
2197         /*
2198          * Enabling MC-RLDRAM. After enabling the device, we timeout
2199          * for around 100ms, which is approximately the time required
2200          * for the device to be ready for operation.
2201          */
2202         val64 = readq(&bar0->mc_rldram_mrs);
2203         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2204         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2205         val64 = readq(&bar0->mc_rldram_mrs);
2206
2207         msleep(100);    /* Delay by around 100 ms. */
2208
2209         /* Enabling ECC Protection. */
2210         val64 = readq(&bar0->adapter_control);
2211         val64 &= ~ADAPTER_ECC_EN;
2212         writeq(val64, &bar0->adapter_control);
2213
2214         /*
2215          * Verify if the device is ready to be enabled, if so enable
2216          * it.
2217          */
2218         val64 = readq(&bar0->adapter_status);
2219         if (!verify_xena_quiescence(nic)) {
2220                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2221                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2222                           (unsigned long long) val64);
2223                 return FAILURE;
2224         }
2225
2226         /*
2227          * With some switches, link might be already up at this point.
2228          * Because of this weird behavior, when we enable laser,
2229          * we may not get link. We need to handle this. We cannot
2230          * figure out which switch is misbehaving. So we are forced to
2231          * make a global change.
2232          */
2233
2234         /* Enabling Laser. */
2235         val64 = readq(&bar0->adapter_control);
2236         val64 |= ADAPTER_EOI_TX_ON;
2237         writeq(val64, &bar0->adapter_control);
2238
2239         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2240                 /*
2241                  * Dont see link state interrupts initally on some switches,
2242                  * so directly scheduling the link state task here.
2243                  */
2244                 schedule_work(&nic->set_link_task);
2245         }
2246         /* SXE-002: Initialize link and activity LED */
2247         subid = nic->pdev->subsystem_device;
2248         if (((subid & 0xFF) >= 0x07) &&
2249             (nic->device_type == XFRAME_I_DEVICE)) {
2250                 val64 = readq(&bar0->gpio_control);
2251                 val64 |= 0x0000800000000000ULL;
2252                 writeq(val64, &bar0->gpio_control);
2253                 val64 = 0x0411040400000000ULL;
2254                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2255         }
2256
2257         return SUCCESS;
2258 }
2259 /**
2260  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2261  */
2262 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2263                                         TxD *txdlp, int get_off)
2264 {
2265         struct s2io_nic *nic = fifo_data->nic;
2266         struct sk_buff *skb;
2267         struct TxD *txds;
2268         u16 j, frg_cnt;
2269
2270         txds = txdlp;
2271         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2272                 pci_unmap_single(nic->pdev, (dma_addr_t)
2273                         txds->Buffer_Pointer, sizeof(u64),
2274                         PCI_DMA_TODEVICE);
2275                 txds++;
2276         }
2277
2278         skb = (struct sk_buff *) ((unsigned long)
2279                         txds->Host_Control);
2280         if (!skb) {
2281                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2282                 return NULL;
2283         }
2284         pci_unmap_single(nic->pdev, (dma_addr_t)
2285                          txds->Buffer_Pointer,
2286                          skb->len - skb->data_len,
2287                          PCI_DMA_TODEVICE);
2288         frg_cnt = skb_shinfo(skb)->nr_frags;
2289         if (frg_cnt) {
2290                 txds++;
2291                 for (j = 0; j < frg_cnt; j++, txds++) {
2292                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2293                         if (!txds->Buffer_Pointer)
2294                                 break;
2295                         pci_unmap_page(nic->pdev, (dma_addr_t)
2296                                         txds->Buffer_Pointer,
2297                                        frag->size, PCI_DMA_TODEVICE);
2298                 }
2299         }
2300         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2301         return(skb);
2302 }
2303
2304 /**
2305  *  free_tx_buffers - Free all queued Tx buffers
2306  *  @nic : device private variable.
2307  *  Description:
2308  *  Free all queued Tx buffers.
2309  *  Return Value: void
2310 */
2311
2312 static void free_tx_buffers(struct s2io_nic *nic)
2313 {
2314         struct net_device *dev = nic->dev;
2315         struct sk_buff *skb;
2316         struct TxD *txdp;
2317         int i, j;
2318         struct mac_info *mac_control;
2319         struct config_param *config;
2320         int cnt = 0;
2321
2322         mac_control = &nic->mac_control;
2323         config = &nic->config;
2324
2325         for (i = 0; i < config->tx_fifo_num; i++) {
2326                 unsigned long flags;
2327                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2328                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2329                         txdp = (struct TxD *) \
2330                         mac_control->fifos[i].list_info[j].list_virt_addr;
2331                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2332                         if (skb) {
2333                                 nic->mac_control.stats_info->sw_stat.mem_freed
2334                                         += skb->truesize;
2335                                 dev_kfree_skb(skb);
2336                                 cnt++;
2337                         }
2338                 }
2339                 DBG_PRINT(INTR_DBG,
2340                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2341                           dev->name, cnt, i);
2342                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2343                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2344                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2345         }
2346 }
2347
2348 /**
2349  *   stop_nic -  To stop the nic
2350  *   @nic ; device private variable.
2351  *   Description:
2352  *   This function does exactly the opposite of what the start_nic()
2353  *   function does. This function is called to stop the device.
2354  *   Return Value:
2355  *   void.
2356  */
2357
2358 static void stop_nic(struct s2io_nic *nic)
2359 {
2360         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2361         register u64 val64 = 0;
2362         u16 interruptible;
2363         struct mac_info *mac_control;
2364         struct config_param *config;
2365
2366         mac_control = &nic->mac_control;
2367         config = &nic->config;
2368
2369         /*  Disable all interrupts */
2370         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2371         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2372         interruptible |= TX_PIC_INTR;
2373         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2374
2375         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2376         val64 = readq(&bar0->adapter_control);
2377         val64 &= ~(ADAPTER_CNTL_EN);
2378         writeq(val64, &bar0->adapter_control);
2379 }
2380
2381 /**
2382  *  fill_rx_buffers - Allocates the Rx side skbs
2383  *  @nic:  device private variable
2384  *  @ring_no: ring number
2385  *  Description:
2386  *  The function allocates Rx side skbs and puts the physical
2387  *  address of these buffers into the RxD buffer pointers, so that the NIC
2388  *  can DMA the received frame into these locations.
2389  *  The NIC supports 3 receive modes, viz
2390  *  1. single buffer,
2391  *  2. three buffer and
2392  *  3. Five buffer modes.
2393  *  Each mode defines how many fragments the received frame will be split
2394  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2395  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2396  *  is split into 3 fragments. As of now only single buffer mode is
2397  *  supported.
2398  *   Return Value:
2399  *  SUCCESS on success or an appropriate -ve value on failure.
2400  */
2401
2402 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2403 {
2404         struct net_device *dev = nic->dev;
2405         struct sk_buff *skb;
2406         struct RxD_t *rxdp;
2407         int off, off1, size, block_no, block_no1;
2408         u32 alloc_tab = 0;
2409         u32 alloc_cnt;
2410         struct mac_info *mac_control;
2411         struct config_param *config;
2412         u64 tmp;
2413         struct buffAdd *ba;
2414         unsigned long flags;
2415         struct RxD_t *first_rxdp = NULL;
2416         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2417         struct RxD1 *rxdp1;
2418         struct RxD3 *rxdp3;
2419         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2420
2421         mac_control = &nic->mac_control;
2422         config = &nic->config;
2423         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2424             atomic_read(&nic->rx_bufs_left[ring_no]);
2425
2426         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2427         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2428         while (alloc_tab < alloc_cnt) {
2429                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2430                     block_index;
2431                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2432
2433                 rxdp = mac_control->rings[ring_no].
2434                                 rx_blocks[block_no].rxds[off].virt_addr;
2435
2436                 if ((block_no == block_no1) && (off == off1) &&
2437                                         (rxdp->Host_Control)) {
2438                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2439                                   dev->name);
2440                         DBG_PRINT(INTR_DBG, " info equated\n");
2441                         goto end;
2442                 }
2443                 if (off && (off == rxd_count[nic->rxd_mode])) {
2444                         mac_control->rings[ring_no].rx_curr_put_info.
2445                             block_index++;
2446                         if (mac_control->rings[ring_no].rx_curr_put_info.
2447                             block_index == mac_control->rings[ring_no].
2448                                         block_count)
2449                                 mac_control->rings[ring_no].rx_curr_put_info.
2450                                         block_index = 0;
2451                         block_no = mac_control->rings[ring_no].
2452                                         rx_curr_put_info.block_index;
2453                         if (off == rxd_count[nic->rxd_mode])
2454                                 off = 0;
2455                         mac_control->rings[ring_no].rx_curr_put_info.
2456                                 offset = off;
2457                         rxdp = mac_control->rings[ring_no].
2458                                 rx_blocks[block_no].block_virt_addr;
2459                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2460                                   dev->name, rxdp);
2461                 }
2462                 if(!napi) {
2463                         spin_lock_irqsave(&nic->put_lock, flags);
2464                         mac_control->rings[ring_no].put_pos =
2465                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2466                         spin_unlock_irqrestore(&nic->put_lock, flags);
2467                 } else {
2468                         mac_control->rings[ring_no].put_pos =
2469                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2470                 }
2471                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2472                         ((nic->rxd_mode == RXD_MODE_3B) &&
2473                                 (rxdp->Control_2 & s2BIT(0)))) {
2474                         mac_control->rings[ring_no].rx_curr_put_info.
2475                                         offset = off;
2476                         goto end;
2477                 }
2478                 /* calculate size of skb based on ring mode */
2479                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2480                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2481                 if (nic->rxd_mode == RXD_MODE_1)
2482                         size += NET_IP_ALIGN;
2483                 else
2484                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2485
2486                 /* allocate skb */
2487                 skb = dev_alloc_skb(size);
2488                 if(!skb) {
2489                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2490                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2491                         if (first_rxdp) {
2492                                 wmb();
2493                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2494                         }
2495                         nic->mac_control.stats_info->sw_stat. \
2496                                 mem_alloc_fail_cnt++;
2497                         return -ENOMEM ;
2498                 }
2499                 nic->mac_control.stats_info->sw_stat.mem_allocated
2500                         += skb->truesize;
2501                 if (nic->rxd_mode == RXD_MODE_1) {
2502                         /* 1 buffer mode - normal operation mode */
2503                         rxdp1 = (struct RxD1*)rxdp;
2504                         memset(rxdp, 0, sizeof(struct RxD1));
2505                         skb_reserve(skb, NET_IP_ALIGN);
2506                         rxdp1->Buffer0_ptr = pci_map_single
2507                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2508                                 PCI_DMA_FROMDEVICE);
2509                         if( (rxdp1->Buffer0_ptr == 0) ||
2510                                 (rxdp1->Buffer0_ptr ==
2511                                 DMA_ERROR_CODE))
2512                                 goto pci_map_failed;
2513
2514                         rxdp->Control_2 =
2515                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2516
2517                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2518                         /*
2519                          * 2 buffer mode -
2520                          * 2 buffer mode provides 128
2521                          * byte aligned receive buffers.
2522                          */
2523
2524                         rxdp3 = (struct RxD3*)rxdp;
2525                         /* save buffer pointers to avoid frequent dma mapping */
2526                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2527                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2528                         memset(rxdp, 0, sizeof(struct RxD3));
2529                         /* restore the buffer pointers for dma sync*/
2530                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2531                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2532
2533                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2534                         skb_reserve(skb, BUF0_LEN);
2535                         tmp = (u64)(unsigned long) skb->data;
2536                         tmp += ALIGN_SIZE;
2537                         tmp &= ~ALIGN_SIZE;
2538                         skb->data = (void *) (unsigned long)tmp;
2539                         skb_reset_tail_pointer(skb);
2540
2541                         if (!(rxdp3->Buffer0_ptr))
2542                                 rxdp3->Buffer0_ptr =
2543                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2544                                            PCI_DMA_FROMDEVICE);
2545                         else
2546                                 pci_dma_sync_single_for_device(nic->pdev,
2547                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2548                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2549                         if( (rxdp3->Buffer0_ptr == 0) ||
2550                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2551                                 goto pci_map_failed;
2552
2553                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2554                         if (nic->rxd_mode == RXD_MODE_3B) {
2555                                 /* Two buffer mode */
2556
2557                                 /*
2558                                  * Buffer2 will have L3/L4 header plus
2559                                  * L4 payload
2560                                  */
2561                                 rxdp3->Buffer2_ptr = pci_map_single
2562                                 (nic->pdev, skb->data, dev->mtu + 4,
2563                                                 PCI_DMA_FROMDEVICE);
2564
2565                                 if( (rxdp3->Buffer2_ptr == 0) ||
2566                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2567                                         goto pci_map_failed;
2568
2569                                 rxdp3->Buffer1_ptr =
2570                                                 pci_map_single(nic->pdev,
2571                                                 ba->ba_1, BUF1_LEN,
2572                                                 PCI_DMA_FROMDEVICE);
2573                                 if( (rxdp3->Buffer1_ptr == 0) ||
2574                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2575                                         pci_unmap_single
2576                                                 (nic->pdev,
2577                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2578                                                 dev->mtu + 4,
2579                                                 PCI_DMA_FROMDEVICE);
2580                                         goto pci_map_failed;
2581                                 }
2582                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2583                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2584                                                                 (dev->mtu + 4);
2585                         }
2586                         rxdp->Control_2 |= s2BIT(0);
2587                 }
2588                 rxdp->Host_Control = (unsigned long) (skb);
2589                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2590                         rxdp->Control_1 |= RXD_OWN_XENA;
2591                 off++;
2592                 if (off == (rxd_count[nic->rxd_mode] + 1))
2593                         off = 0;
2594                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2595
2596                 rxdp->Control_2 |= SET_RXD_MARKER;
2597                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2598                         if (first_rxdp) {
2599                                 wmb();
2600                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2601                         }
2602                         first_rxdp = rxdp;
2603                 }
2604                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2605                 alloc_tab++;
2606         }
2607
2608       end:
2609         /* Transfer ownership of first descriptor to adapter just before
2610          * exiting. Before that, use memory barrier so that ownership
2611          * and other fields are seen by adapter correctly.
2612          */
2613         if (first_rxdp) {
2614                 wmb();
2615                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2616         }
2617
2618         return SUCCESS;
2619 pci_map_failed:
2620         stats->pci_map_fail_cnt++;
2621         stats->mem_freed += skb->truesize;
2622         dev_kfree_skb_irq(skb);
2623         return -ENOMEM;
2624 }
2625
2626 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2627 {
2628         struct net_device *dev = sp->dev;
2629         int j;
2630         struct sk_buff *skb;
2631         struct RxD_t *rxdp;
2632         struct mac_info *mac_control;
2633         struct buffAdd *ba;
2634         struct RxD1 *rxdp1;
2635         struct RxD3 *rxdp3;
2636
2637         mac_control = &sp->mac_control;
2638         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2639                 rxdp = mac_control->rings[ring_no].
2640                                 rx_blocks[blk].rxds[j].virt_addr;
2641                 skb = (struct sk_buff *)
2642                         ((unsigned long) rxdp->Host_Control);
2643                 if (!skb) {
2644                         continue;
2645                 }
2646                 if (sp->rxd_mode == RXD_MODE_1) {
2647                         rxdp1 = (struct RxD1*)rxdp;
2648                         pci_unmap_single(sp->pdev, (dma_addr_t)
2649                                 rxdp1->Buffer0_ptr,
2650                                 dev->mtu +
2651                                 HEADER_ETHERNET_II_802_3_SIZE
2652                                 + HEADER_802_2_SIZE +
2653                                 HEADER_SNAP_SIZE,
2654                                 PCI_DMA_FROMDEVICE);
2655                         memset(rxdp, 0, sizeof(struct RxD1));
2656                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2657                         rxdp3 = (struct RxD3*)rxdp;
2658                         ba = &mac_control->rings[ring_no].
2659                                 ba[blk][j];
2660                         pci_unmap_single(sp->pdev, (dma_addr_t)
2661                                 rxdp3->Buffer0_ptr,
2662                                 BUF0_LEN,
2663                                 PCI_DMA_FROMDEVICE);
2664                         pci_unmap_single(sp->pdev, (dma_addr_t)
2665                                 rxdp3->Buffer1_ptr,
2666                                 BUF1_LEN,
2667                                 PCI_DMA_FROMDEVICE);
2668                         pci_unmap_single(sp->pdev, (dma_addr_t)
2669                                 rxdp3->Buffer2_ptr,
2670                                 dev->mtu + 4,
2671                                 PCI_DMA_FROMDEVICE);
2672                         memset(rxdp, 0, sizeof(struct RxD3));
2673                 }
2674                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2675                 dev_kfree_skb(skb);
2676                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2677         }
2678 }
2679
2680 /**
2681  *  free_rx_buffers - Frees all Rx buffers
2682  *  @sp: device private variable.
2683  *  Description:
2684  *  This function will free all Rx buffers allocated by host.
2685  *  Return Value:
2686  *  NONE.
2687  */
2688
2689 static void free_rx_buffers(struct s2io_nic *sp)
2690 {
2691         struct net_device *dev = sp->dev;
2692         int i, blk = 0, buf_cnt = 0;
2693         struct mac_info *mac_control;
2694         struct config_param *config;
2695
2696         mac_control = &sp->mac_control;
2697         config = &sp->config;
2698
2699         for (i = 0; i < config->rx_ring_num; i++) {
2700                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2701                         free_rxd_blk(sp,i,blk);
2702
2703                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2704                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2705                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2706                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2707                 atomic_set(&sp->rx_bufs_left[i], 0);
2708                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2709                           dev->name, buf_cnt, i);
2710         }
2711 }
2712
2713 /**
2714  * s2io_poll - Rx interrupt handler for NAPI support
2715  * @napi : pointer to the napi structure.
2716  * @budget : The number of packets that were budgeted to be processed
2717  * during  one pass through the 'Poll" function.
2718  * Description:
2719  * Comes into picture only if NAPI support has been incorporated. It does
2720  * the same thing that rx_intr_handler does, but not in a interrupt context
2721  * also It will process only a given number of packets.
2722  * Return value:
2723  * 0 on success and 1 if there are No Rx packets to be processed.
2724  */
2725
2726 static int s2io_poll(struct napi_struct *napi, int budget)
2727 {
2728         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2729         struct net_device *dev = nic->dev;
2730         int pkt_cnt = 0, org_pkts_to_process;
2731         struct mac_info *mac_control;
2732         struct config_param *config;
2733         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2734         int i;
2735
2736         mac_control = &nic->mac_control;
2737         config = &nic->config;
2738
2739         nic->pkts_to_process = budget;
2740         org_pkts_to_process = nic->pkts_to_process;
2741
2742         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2743         readl(&bar0->rx_traffic_int);
2744
2745         for (i = 0; i < config->rx_ring_num; i++) {
2746                 rx_intr_handler(&mac_control->rings[i]);
2747                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2748                 if (!nic->pkts_to_process) {
2749                         /* Quota for the current iteration has been met */
2750                         goto no_rx;
2751                 }
2752         }
2753
2754         netif_rx_complete(dev, napi);
2755
2756         for (i = 0; i < config->rx_ring_num; i++) {
2757                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2758                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2759                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2760                         break;
2761                 }
2762         }
2763         /* Re enable the Rx interrupts. */
2764         writeq(0x0, &bar0->rx_traffic_mask);
2765         readl(&bar0->rx_traffic_mask);
2766         return pkt_cnt;
2767
2768 no_rx:
2769         for (i = 0; i < config->rx_ring_num; i++) {
2770                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2771                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2772                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2773                         break;
2774                 }
2775         }
2776         return pkt_cnt;
2777 }
2778
2779 #ifdef CONFIG_NET_POLL_CONTROLLER
2780 /**
2781  * s2io_netpoll - netpoll event handler entry point
2782  * @dev : pointer to the device structure.
2783  * Description:
2784  *      This function will be called by upper layer to check for events on the
2785  * interface in situations where interrupts are disabled. It is used for
2786  * specific in-kernel networking tasks, such as remote consoles and kernel
2787  * debugging over the network (example netdump in RedHat).
2788  */
2789 static void s2io_netpoll(struct net_device *dev)
2790 {
2791         struct s2io_nic *nic = dev->priv;
2792         struct mac_info *mac_control;
2793         struct config_param *config;
2794         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2795         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2796         int i;
2797
2798         if (pci_channel_offline(nic->pdev))
2799                 return;
2800
2801         disable_irq(dev->irq);
2802
2803         mac_control = &nic->mac_control;
2804         config = &nic->config;
2805
2806         writeq(val64, &bar0->rx_traffic_int);
2807         writeq(val64, &bar0->tx_traffic_int);
2808
2809         /* we need to free up the transmitted skbufs or else netpoll will
2810          * run out of skbs and will fail and eventually netpoll application such
2811          * as netdump will fail.
2812          */
2813         for (i = 0; i < config->tx_fifo_num; i++)
2814                 tx_intr_handler(&mac_control->fifos[i]);
2815
2816         /* check for received packet and indicate up to network */
2817         for (i = 0; i < config->rx_ring_num; i++)
2818                 rx_intr_handler(&mac_control->rings[i]);
2819
2820         for (i = 0; i < config->rx_ring_num; i++) {
2821                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2822                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2823                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2824                         break;
2825                 }
2826         }
2827         enable_irq(dev->irq);
2828         return;
2829 }
2830 #endif
2831
2832 /**
2833  *  rx_intr_handler - Rx interrupt handler
2834  *  @nic: device private variable.
2835  *  Description:
2836  *  If the interrupt is because of a received frame or if the
2837  *  receive ring contains fresh as yet un-processed frames,this function is
2838  *  called. It picks out the RxD at which place the last Rx processing had
2839  *  stopped and sends the skb to the OSM's Rx handler and then increments
2840  *  the offset.
2841  *  Return Value:
2842  *  NONE.
2843  */
2844 static void rx_intr_handler(struct ring_info *ring_data)
2845 {
2846         struct s2io_nic *nic = ring_data->nic;
2847         struct net_device *dev = (struct net_device *) nic->dev;
2848         int get_block, put_block, put_offset;
2849         struct rx_curr_get_info get_info, put_info;
2850         struct RxD_t *rxdp;
2851         struct sk_buff *skb;
2852         int pkt_cnt = 0;
2853         int i;
2854         struct RxD1* rxdp1;
2855         struct RxD3* rxdp3;
2856
2857         spin_lock(&nic->rx_lock);
2858
2859         get_info = ring_data->rx_curr_get_info;
2860         get_block = get_info.block_index;
2861         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2862         put_block = put_info.block_index;
2863         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2864         if (!napi) {
2865                 spin_lock(&nic->put_lock);
2866                 put_offset = ring_data->put_pos;
2867                 spin_unlock(&nic->put_lock);
2868         } else
2869                 put_offset = ring_data->put_pos;
2870
2871         while (RXD_IS_UP2DT(rxdp)) {
2872                 /*
2873                  * If your are next to put index then it's
2874                  * FIFO full condition
2875                  */
2876                 if ((get_block == put_block) &&
2877                     (get_info.offset + 1) == put_info.offset) {
2878                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2879                         break;
2880                 }
2881                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2882                 if (skb == NULL) {
2883                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2884                                   dev->name);
2885                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2886                         spin_unlock(&nic->rx_lock);
2887                         return;
2888                 }
2889                 if (nic->rxd_mode == RXD_MODE_1) {
2890                         rxdp1 = (struct RxD1*)rxdp;
2891                         pci_unmap_single(nic->pdev, (dma_addr_t)
2892                                 rxdp1->Buffer0_ptr,
2893                                 dev->mtu +
2894                                 HEADER_ETHERNET_II_802_3_SIZE +
2895                                 HEADER_802_2_SIZE +
2896                                 HEADER_SNAP_SIZE,
2897                                 PCI_DMA_FROMDEVICE);
2898                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2899                         rxdp3 = (struct RxD3*)rxdp;
2900                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2901                                 rxdp3->Buffer0_ptr,
2902                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2903                         pci_unmap_single(nic->pdev, (dma_addr_t)
2904                                 rxdp3->Buffer2_ptr,
2905                                 dev->mtu + 4,
2906                                 PCI_DMA_FROMDEVICE);
2907                 }
2908                 prefetch(skb->data);
2909                 rx_osm_handler(ring_data, rxdp);
2910                 get_info.offset++;
2911                 ring_data->rx_curr_get_info.offset = get_info.offset;
2912                 rxdp = ring_data->rx_blocks[get_block].
2913                                 rxds[get_info.offset].virt_addr;
2914                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2915                         get_info.offset = 0;
2916                         ring_data->rx_curr_get_info.offset = get_info.offset;
2917                         get_block++;
2918                         if (get_block == ring_data->block_count)
2919                                 get_block = 0;
2920                         ring_data->rx_curr_get_info.block_index = get_block;
2921                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2922                 }
2923
2924                 nic->pkts_to_process -= 1;
2925                 if ((napi) && (!nic->pkts_to_process))
2926                         break;
2927                 pkt_cnt++;
2928                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2929                         break;
2930         }
2931         if (nic->lro) {
2932                 /* Clear all LRO sessions before exiting */
2933                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2934                         struct lro *lro = &nic->lro0_n[i];
2935                         if (lro->in_use) {
2936                                 update_L3L4_header(nic, lro);
2937                                 queue_rx_frame(lro->parent);
2938                                 clear_lro_session(lro);
2939                         }
2940                 }
2941         }
2942
2943         spin_unlock(&nic->rx_lock);
2944 }
2945
2946 /**
2947  *  tx_intr_handler - Transmit interrupt handler
2948  *  @nic : device private variable
2949  *  Description:
2950  *  If an interrupt was raised to indicate DMA complete of the
2951  *  Tx packet, this function is called. It identifies the last TxD
2952  *  whose buffer was freed and frees all skbs whose data have already
2953  *  DMA'ed into the NICs internal memory.
2954  *  Return Value:
2955  *  NONE
2956  */
2957
2958 static void tx_intr_handler(struct fifo_info *fifo_data)
2959 {
2960         struct s2io_nic *nic = fifo_data->nic;
2961         struct net_device *dev = (struct net_device *) nic->dev;
2962         struct tx_curr_get_info get_info, put_info;
2963         struct sk_buff *skb;
2964         struct TxD *txdlp;
2965         unsigned long flags = 0;
2966         u8 err_mask;
2967
2968         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
2969                         return;
2970
2971         get_info = fifo_data->tx_curr_get_info;
2972         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2973         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2974             list_virt_addr;
2975         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2976                (get_info.offset != put_info.offset) &&
2977                (txdlp->Host_Control)) {
2978                 /* Check for TxD errors */
2979                 if (txdlp->Control_1 & TXD_T_CODE) {
2980                         unsigned long long err;
2981                         err = txdlp->Control_1 & TXD_T_CODE;
2982                         if (err & 0x1) {
2983                                 nic->mac_control.stats_info->sw_stat.
2984                                                 parity_err_cnt++;
2985                         }
2986
2987                         /* update t_code statistics */
2988                         err_mask = err >> 48;
2989                         switch(err_mask) {
2990                                 case 2:
2991                                         nic->mac_control.stats_info->sw_stat.
2992                                                         tx_buf_abort_cnt++;
2993                                 break;
2994
2995                                 case 3:
2996                                         nic->mac_control.stats_info->sw_stat.
2997                                                         tx_desc_abort_cnt++;
2998                                 break;
2999
3000                                 case 7:
3001                                         nic->mac_control.stats_info->sw_stat.
3002                                                         tx_parity_err_cnt++;
3003                                 break;
3004
3005                                 case 10:
3006                                         nic->mac_control.stats_info->sw_stat.
3007                                                         tx_link_loss_cnt++;
3008                                 break;
3009
3010                                 case 15:
3011                                         nic->mac_control.stats_info->sw_stat.
3012                                                         tx_list_proc_err_cnt++;
3013                                 break;
3014                         }
3015                 }
3016
3017                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3018                 if (skb == NULL) {
3019                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3020                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3021                         __FUNCTION__);
3022                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3023                         return;
3024                 }
3025
3026                 /* Updating the statistics block */
3027                 nic->stats.tx_bytes += skb->len;
3028                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3029                 dev_kfree_skb_irq(skb);
3030
3031                 get_info.offset++;
3032                 if (get_info.offset == get_info.fifo_len + 1)
3033                         get_info.offset = 0;
3034                 txdlp = (struct TxD *) fifo_data->list_info
3035                     [get_info.offset].list_virt_addr;
3036                 fifo_data->tx_curr_get_info.offset =
3037                     get_info.offset;
3038         }
3039
3040         if (netif_queue_stopped(dev))
3041                 netif_wake_queue(dev);
3042
3043         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3044 }
3045
3046 /**
3047  *  s2io_mdio_write - Function to write in to MDIO registers
3048  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3049  *  @addr     : address value
3050  *  @value    : data value
3051  *  @dev      : pointer to net_device structure
3052  *  Description:
3053  *  This function is used to write values to the MDIO registers
3054  *  NONE
3055  */
3056 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3057 {
3058         u64 val64 = 0x0;
3059         struct s2io_nic *sp = dev->priv;
3060         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3061
3062         //address transaction
3063         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3064                         | MDIO_MMD_DEV_ADDR(mmd_type)
3065                         | MDIO_MMS_PRT_ADDR(0x0);
3066         writeq(val64, &bar0->mdio_control);
3067         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3068         writeq(val64, &bar0->mdio_control);
3069         udelay(100);
3070
3071         //Data transaction
3072         val64 = 0x0;
3073         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3074                         | MDIO_MMD_DEV_ADDR(mmd_type)
3075                         | MDIO_MMS_PRT_ADDR(0x0)
3076                         | MDIO_MDIO_DATA(value)
3077                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3078         writeq(val64, &bar0->mdio_control);
3079         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3080         writeq(val64, &bar0->mdio_control);
3081         udelay(100);
3082
3083         val64 = 0x0;
3084         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3085         | MDIO_MMD_DEV_ADDR(mmd_type)
3086         | MDIO_MMS_PRT_ADDR(0x0)
3087         | MDIO_OP(MDIO_OP_READ_TRANS);
3088         writeq(val64, &bar0->mdio_control);
3089         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3090         writeq(val64, &bar0->mdio_control);
3091         udelay(100);
3092
3093 }
3094
3095 /**
3096  *  s2io_mdio_read - Function to write in to MDIO registers
3097  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3098  *  @addr     : address value
3099  *  @dev      : pointer to net_device structure
3100  *  Description:
3101  *  This function is used to read values to the MDIO registers
3102  *  NONE
3103  */
3104 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3105 {
3106         u64 val64 = 0x0;
3107         u64 rval64 = 0x0;
3108         struct s2io_nic *sp = dev->priv;
3109         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3110
3111         /* address transaction */
3112         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3113                         | MDIO_MMD_DEV_ADDR(mmd_type)
3114                         | MDIO_MMS_PRT_ADDR(0x0);
3115         writeq(val64, &bar0->mdio_control);
3116         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3117         writeq(val64, &bar0->mdio_control);
3118         udelay(100);
3119
3120         /* Data transaction */
3121         val64 = 0x0;
3122         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3123                         | MDIO_MMD_DEV_ADDR(mmd_type)
3124                         | MDIO_MMS_PRT_ADDR(0x0)
3125                         | MDIO_OP(MDIO_OP_READ_TRANS);
3126         writeq(val64, &bar0->mdio_control);
3127         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3128         writeq(val64, &bar0->mdio_control);
3129         udelay(100);
3130
3131         /* Read the value from regs */
3132         rval64 = readq(&bar0->mdio_control);
3133         rval64 = rval64 & 0xFFFF0000;
3134         rval64 = rval64 >> 16;
3135         return rval64;
3136 }
3137 /**
3138  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3139  *  @counter      : couter value to be updated
3140  *  @flag         : flag to indicate the status
3141  *  @type         : counter type
3142  *  Description:
3143  *  This function is to check the status of the xpak counters value
3144  *  NONE
3145  */
3146
3147 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3148 {
3149         u64 mask = 0x3;
3150         u64 val64;
3151         int i;
3152         for(i = 0; i <index; i++)
3153                 mask = mask << 0x2;
3154
3155         if(flag > 0)
3156         {
3157                 *counter = *counter + 1;
3158                 val64 = *regs_stat & mask;
3159                 val64 = val64 >> (index * 0x2);
3160                 val64 = val64 + 1;
3161                 if(val64 == 3)
3162                 {
3163                         switch(type)
3164                         {
3165                         case 1:
3166                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3167                                           "service. Excessive temperatures may "
3168                                           "result in premature transceiver "
3169                                           "failure \n");
3170                         break;
3171                         case 2:
3172                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3173                                           "service Excessive bias currents may "
3174                                           "indicate imminent laser diode "
3175                                           "failure \n");
3176                         break;
3177                         case 3:
3178                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3179                                           "service Excessive laser output "
3180                                           "power may saturate far-end "
3181                                           "receiver\n");
3182                         break;
3183                         default:
3184                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3185                                           "type \n");
3186                         }
3187                         val64 = 0x0;
3188                 }
3189                 val64 = val64 << (index * 0x2);
3190                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3191
3192         } else {
3193                 *regs_stat = *regs_stat & (~mask);
3194         }
3195 }
3196
3197 /**
3198  *  s2io_updt_xpak_counter - Function to update the xpak counters
3199  *  @dev         : pointer to net_device struct
3200  *  Description:
3201  *  This function is to upate the status of the xpak counters value
3202  *  NONE
3203  */
3204 static void s2io_updt_xpak_counter(struct net_device *dev)
3205 {
3206         u16 flag  = 0x0;
3207         u16 type  = 0x0;
3208         u16 val16 = 0x0;
3209         u64 val64 = 0x0;
3210         u64 addr  = 0x0;
3211
3212         struct s2io_nic *sp = dev->priv;
3213         struct stat_block *stat_info = sp->mac_control.stats_info;
3214
3215         /* Check the communication with the MDIO slave */
3216         addr = 0x0000;
3217         val64 = 0x0;
3218         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3219         if((val64 == 0xFFFF) || (val64 == 0x0000))
3220         {
3221                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3222                           "Returned %llx\n", (unsigned long long)val64);
3223                 return;
3224         }
3225
3226         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3227         if(val64 != 0x2040)
3228         {
3229                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3230                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3231                           (unsigned long long)val64);
3232                 return;
3233         }
3234
3235         /* Loading the DOM register to MDIO register */
3236         addr = 0xA100;
3237         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3238         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3239
3240         /* Reading the Alarm flags */
3241         addr = 0xA070;
3242         val64 = 0x0;
3243         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3244
3245         flag = CHECKBIT(val64, 0x7);
3246         type = 1;
3247         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3248                                 &stat_info->xpak_stat.xpak_regs_stat,
3249                                 0x0, flag, type);
3250
3251         if(CHECKBIT(val64, 0x6))
3252                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3253
3254         flag = CHECKBIT(val64, 0x3);
3255         type = 2;
3256         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3257                                 &stat_info->xpak_stat.xpak_regs_stat,
3258                                 0x2, flag, type);
3259
3260         if(CHECKBIT(val64, 0x2))
3261                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3262
3263         flag = CHECKBIT(val64, 0x1);
3264         type = 3;
3265         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3266                                 &stat_info->xpak_stat.xpak_regs_stat,
3267                                 0x4, flag, type);
3268
3269         if(CHECKBIT(val64, 0x0))
3270                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3271
3272         /* Reading the Warning flags */
3273         addr = 0xA074;
3274         val64 = 0x0;
3275         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3276
3277         if(CHECKBIT(val64, 0x7))
3278                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3279
3280         if(CHECKBIT(val64, 0x6))
3281                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3282
3283         if(CHECKBIT(val64, 0x3))
3284                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3285
3286         if(CHECKBIT(val64, 0x2))
3287                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3288
3289         if(CHECKBIT(val64, 0x1))
3290                 stat_info->xpak_stat.warn_laser_output_power_high++;
3291
3292         if(CHECKBIT(val64, 0x0))
3293                 stat_info->xpak_stat.warn_laser_output_power_low++;
3294 }
3295
3296 /**
3297  *  wait_for_cmd_complete - waits for a command to complete.
3298  *  @sp : private member of the device structure, which is a pointer to the
3299  *  s2io_nic structure.
3300  *  Description: Function that waits for a command to Write into RMAC
3301  *  ADDR DATA registers to be completed and returns either success or
3302  *  error depending on whether the command was complete or not.
3303  *  Return value:
3304  *   SUCCESS on success and FAILURE on failure.
3305  */
3306
3307 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3308                                 int bit_state)
3309 {
3310         int ret = FAILURE, cnt = 0, delay = 1;
3311         u64 val64;
3312
3313         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3314                 return FAILURE;
3315
3316         do {
3317                 val64 = readq(addr);
3318                 if (bit_state == S2IO_BIT_RESET) {
3319                         if (!(val64 & busy_bit)) {
3320                                 ret = SUCCESS;
3321                                 break;
3322                         }
3323                 } else {
3324                         if (!(val64 & busy_bit)) {
3325                                 ret = SUCCESS;
3326                                 break;
3327                         }
3328                 }
3329
3330                 if(in_interrupt())
3331                         mdelay(delay);
3332                 else
3333                         msleep(delay);
3334
3335                 if (++cnt >= 10)
3336                         delay = 50;
3337         } while (cnt < 20);
3338         return ret;
3339 }
3340 /*
3341  * check_pci_device_id - Checks if the device id is supported
3342  * @id : device id
3343  * Description: Function to check if the pci device id is supported by driver.
3344  * Return value: Actual device id if supported else PCI_ANY_ID
3345  */
3346 static u16 check_pci_device_id(u16 id)
3347 {
3348         switch (id) {
3349         case PCI_DEVICE_ID_HERC_WIN:
3350         case PCI_DEVICE_ID_HERC_UNI:
3351                 return XFRAME_II_DEVICE;
3352         case PCI_DEVICE_ID_S2IO_UNI:
3353         case PCI_DEVICE_ID_S2IO_WIN:
3354                 return XFRAME_I_DEVICE;
3355         default:
3356                 return PCI_ANY_ID;
3357         }
3358 }
3359
3360 /**
3361  *  s2io_reset - Resets the card.
3362  *  @sp : private member of the device structure.
3363  *  Description: Function to Reset the card. This function then also
3364  *  restores the previously saved PCI configuration space registers as
3365  *  the card reset also resets the configuration space.
3366  *  Return value:
3367  *  void.
3368  */
3369
3370 static void s2io_reset(struct s2io_nic * sp)
3371 {
3372         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3373         u64 val64;
3374         u16 subid, pci_cmd;
3375         int i;
3376         u16 val16;
3377         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3378         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3379
3380         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3381                         __FUNCTION__, sp->dev->name);
3382
3383         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3384         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3385
3386         val64 = SW_RESET_ALL;
3387         writeq(val64, &bar0->sw_reset);
3388         if (strstr(sp->product_name, "CX4")) {
3389                 msleep(750);
3390         }
3391         msleep(250);
3392         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3393
3394                 /* Restore the PCI state saved during initialization. */
3395                 pci_restore_state(sp->pdev);
3396                 pci_read_config_word(sp->pdev, 0x2, &val16);
3397                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3398                         break;
3399                 msleep(200);
3400         }
3401
3402         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3403                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3404         }
3405
3406         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3407
3408         s2io_init_pci(sp);
3409
3410         /* Set swapper to enable I/O register access */
3411         s2io_set_swapper(sp);
3412
3413         /* restore mac_addr entries */
3414         do_s2io_restore_unicast_mc(sp);
3415
3416         /* Restore the MSIX table entries from local variables */
3417         restore_xmsi_data(sp);
3418
3419         /* Clear certain PCI/PCI-X fields after reset */
3420         if (sp->device_type == XFRAME_II_DEVICE) {
3421                 /* Clear "detected parity error" bit */
3422                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3423
3424                 /* Clearing PCIX Ecc status register */
3425                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3426
3427                 /* Clearing PCI_STATUS error reflected here */
3428                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3429         }
3430
3431         /* Reset device statistics maintained by OS */
3432         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3433
3434         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3435         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3436         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3437         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3438         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3439         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3440         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3441         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3442         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3443         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3444         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3445         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3446         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3447         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3448         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3449         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3450         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3451         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3452         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3453
3454         /* SXE-002: Configure link and activity LED to turn it off */
3455         subid = sp->pdev->subsystem_device;
3456         if (((subid & 0xFF) >= 0x07) &&
3457             (sp->device_type == XFRAME_I_DEVICE)) {
3458                 val64 = readq(&bar0->gpio_control);
3459                 val64 |= 0x0000800000000000ULL;
3460                 writeq(val64, &bar0->gpio_control);
3461                 val64 = 0x0411040400000000ULL;
3462                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3463         }
3464
3465         /*
3466          * Clear spurious ECC interrupts that would have occured on
3467          * XFRAME II cards after reset.
3468          */
3469         if (sp->device_type == XFRAME_II_DEVICE) {
3470                 val64 = readq(&bar0->pcc_err_reg);
3471                 writeq(val64, &bar0->pcc_err_reg);
3472         }
3473
3474         sp->device_enabled_once = FALSE;
3475 }
3476
3477 /**
3478  *  s2io_set_swapper - to set the swapper controle on the card
3479  *  @sp : private member of the device structure,
3480  *  pointer to the s2io_nic structure.
3481  *  Description: Function to set the swapper control on the card
3482  *  correctly depending on the 'endianness' of the system.
3483  *  Return value:
3484  *  SUCCESS on success and FAILURE on failure.
3485  */
3486
3487 static int s2io_set_swapper(struct s2io_nic * sp)
3488 {
3489         struct net_device *dev = sp->dev;
3490         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3491         u64 val64, valt, valr;
3492
3493         /*
3494          * Set proper endian settings and verify the same by reading
3495          * the PIF Feed-back register.
3496          */
3497
3498         val64 = readq(&bar0->pif_rd_swapper_fb);
3499         if (val64 != 0x0123456789ABCDEFULL) {
3500                 int i = 0;
3501                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3502                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3503                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3504                                 0};                     /* FE=0, SE=0 */
3505
3506                 while(i<4) {
3507                         writeq(value[i], &bar0->swapper_ctrl);
3508                         val64 = readq(&bar0->pif_rd_swapper_fb);
3509                         if (val64 == 0x0123456789ABCDEFULL)
3510                                 break;
3511                         i++;
3512                 }
3513                 if (i == 4) {
3514                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3515                                 dev->name);
3516                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3517                                 (unsigned long long) val64);
3518                         return FAILURE;
3519                 }
3520                 valr = value[i];
3521         } else {
3522                 valr = readq(&bar0->swapper_ctrl);
3523         }
3524
3525         valt = 0x0123456789ABCDEFULL;
3526         writeq(valt, &bar0->xmsi_address);
3527         val64 = readq(&bar0->xmsi_address);
3528
3529         if(val64 != valt) {
3530                 int i = 0;
3531                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3532                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3533                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3534                                 0};                     /* FE=0, SE=0 */
3535
3536                 while(i<4) {
3537                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3538                         writeq(valt, &bar0->xmsi_address);
3539                         val64 = readq(&bar0->xmsi_address);
3540                         if(val64 == valt)
3541                                 break;
3542                         i++;
3543                 }
3544                 if(i == 4) {
3545                         unsigned long long x = val64;
3546                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3547                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3548                         return FAILURE;
3549                 }
3550         }
3551         val64 = readq(&bar0->swapper_ctrl);
3552         val64 &= 0xFFFF000000000000ULL;
3553
3554 #ifdef  __BIG_ENDIAN
3555         /*
3556          * The device by default set to a big endian format, so a
3557          * big endian driver need not set anything.
3558          */
3559         val64 |= (SWAPPER_CTRL_TXP_FE |
3560                  SWAPPER_CTRL_TXP_SE |
3561                  SWAPPER_CTRL_TXD_R_FE |
3562                  SWAPPER_CTRL_TXD_W_FE |
3563                  SWAPPER_CTRL_TXF_R_FE |
3564                  SWAPPER_CTRL_RXD_R_FE |
3565                  SWAPPER_CTRL_RXD_W_FE |
3566                  SWAPPER_CTRL_RXF_W_FE |
3567                  SWAPPER_CTRL_XMSI_FE |
3568                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3569         if (sp->config.intr_type == INTA)
3570                 val64 |= SWAPPER_CTRL_XMSI_SE;
3571         writeq(val64, &bar0->swapper_ctrl);
3572 #else
3573         /*
3574          * Initially we enable all bits to make it accessible by the
3575          * driver, then we selectively enable only those bits that
3576          * we want to set.
3577          */
3578         val64 |= (SWAPPER_CTRL_TXP_FE |
3579                  SWAPPER_CTRL_TXP_SE |
3580                  SWAPPER_CTRL_TXD_R_FE |
3581                  SWAPPER_CTRL_TXD_R_SE |
3582                  SWAPPER_CTRL_TXD_W_FE |
3583                  SWAPPER_CTRL_TXD_W_SE |
3584                  SWAPPER_CTRL_TXF_R_FE |
3585                  SWAPPER_CTRL_RXD_R_FE |
3586                  SWAPPER_CTRL_RXD_R_SE |
3587                  SWAPPER_CTRL_RXD_W_FE |
3588                  SWAPPER_CTRL_RXD_W_SE |
3589                  SWAPPER_CTRL_RXF_W_FE |
3590                  SWAPPER_CTRL_XMSI_FE |
3591                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3592         if (sp->config.intr_type == INTA)
3593                 val64 |= SWAPPER_CTRL_XMSI_SE;
3594         writeq(val64, &bar0->swapper_ctrl);
3595 #endif
3596         val64 = readq(&bar0->swapper_ctrl);
3597
3598         /*
3599          * Verifying if endian settings are accurate by reading a
3600          * feedback register.
3601          */
3602         val64 = readq(&bar0->pif_rd_swapper_fb);
3603         if (val64 != 0x0123456789ABCDEFULL) {
3604                 /* Endian settings are incorrect, calls for another dekko. */
3605                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3606                           dev->name);
3607                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3608                           (unsigned long long) val64);
3609                 return FAILURE;
3610         }
3611
3612         return SUCCESS;
3613 }
3614
3615 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3616 {
3617         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3618         u64 val64;
3619         int ret = 0, cnt = 0;
3620
3621         do {
3622                 val64 = readq(&bar0->xmsi_access);
3623                 if (!(val64 & s2BIT(15)))
3624                         break;
3625                 mdelay(1);
3626                 cnt++;
3627         } while(cnt < 5);
3628         if (cnt == 5) {
3629                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3630                 ret = 1;
3631         }
3632
3633         return ret;
3634 }
3635
3636 static void restore_xmsi_data(struct s2io_nic *nic)
3637 {
3638         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3639         u64 val64;
3640         int i;
3641
3642         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3643                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3644                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3645                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
3646                 writeq(val64, &bar0->xmsi_access);
3647                 if (wait_for_msix_trans(nic, i)) {
3648                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3649                         continue;
3650                 }
3651         }
3652 }
3653
3654 static void store_xmsi_data(struct s2io_nic *nic)
3655 {
3656         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3657         u64 val64, addr, data;
3658         int i;
3659
3660         /* Store and display */
3661         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3662                 val64 = (s2BIT(15) | vBIT(i, 26, 6));
3663                 writeq(val64, &bar0->xmsi_access);
3664                 if (wait_for_msix_trans(nic, i)) {
3665                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3666                         continue;
3667                 }
3668                 addr = readq(&bar0->xmsi_address);
3669                 data = readq(&bar0->xmsi_data);
3670                 if (addr && data) {
3671                         nic->msix_info[i].addr = addr;
3672                         nic->msix_info[i].data = data;
3673                 }
3674         }
3675 }
3676
3677 static int s2io_enable_msi_x(struct s2io_nic *nic)
3678 {
3679         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3680         u64 tx_mat, rx_mat;
3681         u16 msi_control; /* Temp variable */
3682         int ret, i, j, msix_indx = 1;
3683
3684         nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3685                                GFP_KERNEL);
3686         if (!nic->entries) {
3687                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3688                         __FUNCTION__);
3689                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3690                 return -ENOMEM;
3691         }
3692         nic->mac_control.stats_info->sw_stat.mem_allocated
3693                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3694
3695         nic->s2io_entries =
3696                 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3697                                    GFP_KERNEL);
3698         if (!nic->s2io_entries) {
3699                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3700                         __FUNCTION__);
3701                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3702                 kfree(nic->entries);
3703                 nic->mac_control.stats_info->sw_stat.mem_freed
3704                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3705                 return -ENOMEM;
3706         }
3707          nic->mac_control.stats_info->sw_stat.mem_allocated
3708                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3709
3710         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3711                 nic->entries[i].entry = i;
3712                 nic->s2io_entries[i].entry = i;
3713                 nic->s2io_entries[i].arg = NULL;
3714                 nic->s2io_entries[i].in_use = 0;
3715         }
3716
3717         tx_mat = readq(&bar0->tx_mat0_n[0]);
3718         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3719                 tx_mat |= TX_MAT_SET(i, msix_indx);
3720                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3721                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3722                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3723         }
3724         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3725
3726         rx_mat = readq(&bar0->rx_mat);
3727         for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3728                 rx_mat |= RX_MAT_SET(j, msix_indx);
3729                 nic->s2io_entries[msix_indx].arg
3730                         = &nic->mac_control.rings[j];
3731                 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3732                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3733         }
3734         writeq(rx_mat, &bar0->rx_mat);
3735
3736         nic->avail_msix_vectors = 0;
3737         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3738         /* We fail init if error or we get less vectors than min required */
3739         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3740                 nic->avail_msix_vectors = ret;
3741                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3742         }
3743         if (ret) {
3744                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3745                 kfree(nic->entries);
3746                 nic->mac_control.stats_info->sw_stat.mem_freed
3747                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3748                 kfree(nic->s2io_entries);
3749                 nic->mac_control.stats_info->sw_stat.mem_freed
3750                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3751                 nic->entries = NULL;
3752                 nic->s2io_entries = NULL;
3753                 nic->avail_msix_vectors = 0;
3754                 return -ENOMEM;
3755         }
3756         if (!nic->avail_msix_vectors)
3757                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3758
3759         /*
3760          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3761          * in the herc NIC. (Temp change, needs to be removed later)
3762          */
3763         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3764         msi_control |= 0x1; /* Enable MSI */
3765         pci_write_config_word(nic->pdev, 0x42, msi_control);
3766
3767         return 0;
3768 }
3769
3770 /* Handle software interrupt used during MSI(X) test */
3771 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3772 {
3773         struct s2io_nic *sp = dev_id;
3774
3775         sp->msi_detected = 1;
3776         wake_up(&sp->msi_wait);
3777
3778         return IRQ_HANDLED;
3779 }
3780
3781 /* Test interrupt path by forcing a a software IRQ */
3782 static int s2io_test_msi(struct s2io_nic *sp)
3783 {
3784         struct pci_dev *pdev = sp->pdev;
3785         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3786         int err;
3787         u64 val64, saved64;
3788
3789         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3790                         sp->name, sp);
3791         if (err) {
3792                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3793                        sp->dev->name, pci_name(pdev), pdev->irq);
3794                 return err;
3795         }
3796
3797         init_waitqueue_head (&sp->msi_wait);
3798         sp->msi_detected = 0;
3799
3800         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3801         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3802         val64 |= SCHED_INT_CTRL_TIMER_EN;
3803         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3804         writeq(val64, &bar0->scheduled_int_ctrl);
3805
3806         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3807
3808         if (!sp->msi_detected) {
3809                 /* MSI(X) test failed, go back to INTx mode */
3810                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3811                         "using MSI(X) during test\n", sp->dev->name,
3812                         pci_name(pdev));
3813
3814                 err = -EOPNOTSUPP;
3815         }
3816
3817         free_irq(sp->entries[1].vector, sp);
3818
3819         writeq(saved64, &bar0->scheduled_int_ctrl);
3820
3821         return err;
3822 }
3823
3824 static void remove_msix_isr(struct s2io_nic *sp)
3825 {
3826         int i;
3827         u16 msi_control;
3828
3829         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3830                 if (sp->s2io_entries[i].in_use ==
3831                         MSIX_REGISTERED_SUCCESS) {
3832                         int vector = sp->entries[i].vector;
3833                         void *arg = sp->s2io_entries[i].arg;
3834                         free_irq(vector, arg);
3835                 }
3836         }
3837
3838         kfree(sp->entries);
3839         kfree(sp->s2io_entries);
3840         sp->entries = NULL;
3841         sp->s2io_entries = NULL;
3842
3843         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3844         msi_control &= 0xFFFE; /* Disable MSI */
3845         pci_write_config_word(sp->pdev, 0x42, msi_control);
3846
3847         pci_disable_msix(sp->pdev);
3848 }
3849
3850 static void remove_inta_isr(struct s2io_nic *sp)
3851 {
3852         struct net_device *dev = sp->dev;
3853
3854         free_irq(sp->pdev->irq, dev);
3855 }
3856
3857 /* ********************************************************* *
3858  * Functions defined below concern the OS part of the driver *
3859  * ********************************************************* */
3860
3861 /**
3862  *  s2io_open - open entry point of the driver
3863  *  @dev : pointer to the device structure.
3864  *  Description:
3865  *  This function is the open entry point of the driver. It mainly calls a
3866  *  function to allocate Rx buffers and inserts them into the buffer
3867  *  descriptors and then enables the Rx part of the NIC.
3868  *  Return value:
3869  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3870  *   file on failure.
3871  */
3872
3873 static int s2io_open(struct net_device *dev)
3874 {
3875         struct s2io_nic *sp = dev->priv;
3876         int err = 0;
3877
3878         /*
3879          * Make sure you have link off by default every time
3880          * Nic is initialized
3881          */
3882         netif_carrier_off(dev);
3883         sp->last_link_state = 0;
3884
3885         if (sp->config.intr_type == MSI_X) {
3886                 int ret = s2io_enable_msi_x(sp);
3887
3888                 if (!ret) {
3889                         ret = s2io_test_msi(sp);
3890                         /* rollback MSI-X, will re-enable during add_isr() */
3891                         remove_msix_isr(sp);
3892                 }
3893                 if (ret) {
3894
3895                         DBG_PRINT(ERR_DBG,
3896                           "%s: MSI-X requested but failed to enable\n",
3897                           dev->name);
3898                         sp->config.intr_type = INTA;
3899                 }
3900         }
3901
3902         /* NAPI doesn't work well with MSI(X) */
3903          if (sp->config.intr_type != INTA) {
3904                 if(sp->config.napi)
3905                         sp->config.napi = 0;
3906         }
3907
3908         /* Initialize H/W and enable interrupts */
3909         err = s2io_card_up(sp);
3910         if (err) {
3911                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3912                           dev->name);
3913                 goto hw_init_failed;
3914         }
3915
3916         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3917                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3918                 s2io_card_down(sp);
3919                 err = -ENODEV;
3920                 goto hw_init_failed;
3921         }
3922
3923         netif_start_queue(dev);
3924         return 0;
3925
3926 hw_init_failed:
3927         if (sp->config.intr_type == MSI_X) {
3928                 if (sp->entries) {
3929                         kfree(sp->entries);
3930                         sp->mac_control.stats_info->sw_stat.mem_freed
3931                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3932                 }
3933                 if (sp->s2io_entries) {
3934                         kfree(sp->s2io_entries);
3935                         sp->mac_control.stats_info->sw_stat.mem_freed
3936                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3937                 }
3938         }
3939         return err;
3940 }
3941
3942 /**
3943  *  s2io_close -close entry point of the driver
3944  *  @dev : device pointer.
3945  *  Description:
3946  *  This is the stop entry point of the driver. It needs to undo exactly
3947  *  whatever was done by the open entry point,thus it's usually referred to
3948  *  as the close function.Among other things this function mainly stops the
3949  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3950  *  Return value:
3951  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3952  *  file on failure.
3953  */
3954
3955 static int s2io_close(struct net_device *dev)
3956 {
3957         struct s2io_nic *sp = dev->priv;
3958         struct config_param *config = &sp->config;
3959         u64 tmp64;
3960         int offset;
3961
3962         /* Return if the device is already closed               *
3963         *  Can happen when s2io_card_up failed in change_mtu    *
3964         */
3965         if (!is_s2io_card_up(sp))
3966                 return 0;
3967
3968         netif_stop_queue(dev);
3969
3970         /* delete all populated mac entries */
3971         for (offset = 1; offset < config->max_mc_addr; offset++) {
3972                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3973                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3974                         do_s2io_delete_unicast_mc(sp, tmp64);
3975         }
3976
3977         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3978         s2io_card_down(sp);
3979
3980         return 0;
3981 }
3982
3983 /**
3984  *  s2io_xmit - Tx entry point of te driver
3985  *  @skb : the socket buffer containing the Tx data.
3986  *  @dev : device pointer.
3987  *  Description :
3988  *  This function is the Tx entry point of the driver. S2IO NIC supports
3989  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3990  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3991  *  not be upadted.
3992  *  Return value:
3993  *  0 on success & 1 on failure.
3994  */
3995
3996 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3997 {
3998         struct s2io_nic *sp = dev->priv;
3999         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4000         register u64 val64;
4001         struct TxD *txdp;
4002         struct TxFIFO_element __iomem *tx_fifo;
4003         unsigned long flags = 0;
4004         u16 vlan_tag = 0;
4005         int vlan_priority = 0;
4006         struct fifo_info *fifo = NULL;
4007         struct mac_info *mac_control;
4008         struct config_param *config;
4009         int offload_type;
4010         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4011
4012         mac_control = &sp->mac_control;
4013         config = &sp->config;
4014
4015         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4016
4017         if (unlikely(skb->len <= 0)) {
4018                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4019                 dev_kfree_skb_any(skb);
4020                 return 0;
4021         }
4022
4023         if (!is_s2io_card_up(sp)) {
4024                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4025                           dev->name);
4026                 dev_kfree_skb(skb);
4027                 return 0;
4028         }
4029
4030         queue = 0;
4031         /* Get Fifo number to Transmit based on vlan priority */
4032         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4033                 vlan_tag = vlan_tx_tag_get(skb);
4034                 vlan_priority = vlan_tag >> 13;
4035                 queue = config->fifo_mapping[vlan_priority];
4036         }
4037
4038         fifo = &mac_control->fifos[queue];
4039         spin_lock_irqsave(&fifo->tx_lock, flags);
4040         put_off = (u16) fifo->tx_curr_put_info.offset;
4041         get_off = (u16) fifo->tx_curr_get_info.offset;
4042         txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4043
4044         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4045         /* Avoid "put" pointer going beyond "get" pointer */
4046         if (txdp->Host_Control ||
4047                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4048                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4049                 netif_stop_queue(dev);
4050                 dev_kfree_skb(skb);
4051                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4052                 return 0;
4053         }
4054
4055         offload_type = s2io_offload_type(skb);
4056         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4057                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4058                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4059         }
4060         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4061                 txdp->Control_2 |=
4062                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4063                      TXD_TX_CKO_UDP_EN);
4064         }
4065         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4066         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4067         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4068
4069         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4070                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4071                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4072         }
4073
4074         frg_len = skb->len - skb->data_len;
4075         if (offload_type == SKB_GSO_UDP) {
4076                 int ufo_size;
4077
4078                 ufo_size = s2io_udp_mss(skb);
4079                 ufo_size &= ~7;
4080                 txdp->Control_1 |= TXD_UFO_EN;
4081                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4082                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4083 #ifdef __BIG_ENDIAN
4084                 fifo->ufo_in_band_v[put_off] =
4085                                 (u64)skb_shinfo(skb)->ip6_frag_id;
4086 #else
4087                 fifo->ufo_in_band_v[put_off] =
4088                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4089 #endif
4090                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4091                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4092                                         fifo->ufo_in_band_v,
4093                                         sizeof(u64), PCI_DMA_TODEVICE);
4094                 if((txdp->Buffer_Pointer == 0) ||
4095                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4096                         goto pci_map_failed;
4097                 txdp++;
4098         }
4099
4100         txdp->Buffer_Pointer = pci_map_single
4101             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4102         if((txdp->Buffer_Pointer == 0) ||
4103                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4104                 goto pci_map_failed;
4105
4106         txdp->Host_Control = (unsigned long) skb;
4107         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4108         if (offload_type == SKB_GSO_UDP)
4109                 txdp->Control_1 |= TXD_UFO_EN;
4110
4111         frg_cnt = skb_shinfo(skb)->nr_frags;
4112         /* For fragmented SKB. */
4113         for (i = 0; i < frg_cnt; i++) {
4114                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4115                 /* A '0' length fragment will be ignored */
4116                 if (!frag->size)
4117                         continue;
4118                 txdp++;
4119                 txdp->Buffer_Pointer = (u64) pci_map_page
4120                     (sp->pdev, frag->page, frag->page_offset,
4121                      frag->size, PCI_DMA_TODEVICE);
4122                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4123                 if (offload_type == SKB_GSO_UDP)
4124                         txdp->Control_1 |= TXD_UFO_EN;
4125         }
4126         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4127
4128         if (offload_type == SKB_GSO_UDP)
4129                 frg_cnt++; /* as Txd0 was used for inband header */
4130
4131         tx_fifo = mac_control->tx_FIFO_start[queue];
4132         val64 = fifo->list_info[put_off].list_phy_addr;
4133         writeq(val64, &tx_fifo->TxDL_Pointer);
4134
4135         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4136                  TX_FIFO_LAST_LIST);
4137         if (offload_type)
4138                 val64 |= TX_FIFO_SPECIAL_FUNC;
4139
4140         writeq(val64, &tx_fifo->List_Control);
4141
4142         mmiowb();
4143
4144         put_off++;
4145         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4146                 put_off = 0;
4147         fifo->tx_curr_put_info.offset = put_off;
4148
4149         /* Avoid "put" pointer going beyond "get" pointer */
4150         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4151                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4152                 DBG_PRINT(TX_DBG,
4153                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4154                           put_off, get_off);
4155                 netif_stop_queue(dev);
4156         }
4157         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4158         dev->trans_start = jiffies;
4159         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4160
4161         return 0;
4162 pci_map_failed:
4163         stats->pci_map_fail_cnt++;
4164         netif_stop_queue(dev);
4165         stats->mem_freed += skb->truesize;
4166         dev_kfree_skb(skb);
4167         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4168         return 0;
4169 }
4170
4171 static void
4172 s2io_alarm_handle(unsigned long data)
4173 {
4174         struct s2io_nic *sp = (struct s2io_nic *)data;
4175         struct net_device *dev = sp->dev;
4176
4177         s2io_handle_errors(dev);
4178         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4179 }
4180
4181 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4182 {
4183         int rxb_size, level;
4184
4185         if (!sp->lro) {
4186                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4187                 level = rx_buffer_level(sp, rxb_size, rng_n);
4188
4189                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4190                         int ret;
4191                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4192                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4193                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4194                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4195                                           __FUNCTION__);
4196                                 clear_bit(0, (&sp->tasklet_status));
4197                                 return -1;
4198                         }
4199                         clear_bit(0, (&sp->tasklet_status));
4200                 } else if (level == LOW)
4201                         tasklet_schedule(&sp->task);
4202
4203         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4204                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4205                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4206         }
4207         return 0;
4208 }
4209
4210 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4211 {
4212         struct ring_info *ring = (struct ring_info *)dev_id;
4213         struct s2io_nic *sp = ring->nic;
4214
4215         if (!is_s2io_card_up(sp))
4216                 return IRQ_HANDLED;
4217
4218         rx_intr_handler(ring);
4219         s2io_chk_rx_buffers(sp, ring->ring_no);
4220
4221         return IRQ_HANDLED;
4222 }
4223
4224 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4225 {
4226         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4227         struct s2io_nic *sp = fifo->nic;
4228
4229         if (!is_s2io_card_up(sp))
4230                 return IRQ_HANDLED;
4231
4232         tx_intr_handler(fifo);
4233         return IRQ_HANDLED;
4234 }
4235 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4236 {
4237         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4238         u64 val64;
4239
4240         val64 = readq(&bar0->pic_int_status);
4241         if (val64 & PIC_INT_GPIO) {
4242                 val64 = readq(&bar0->gpio_int_reg);
4243                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4244                     (val64 & GPIO_INT_REG_LINK_UP)) {
4245                         /*
4246                          * This is unstable state so clear both up/down
4247                          * interrupt and adapter to re-evaluate the link state.
4248                          */
4249                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4250                         val64 |= GPIO_INT_REG_LINK_UP;
4251                         writeq(val64, &bar0->gpio_int_reg);
4252                         val64 = readq(&bar0->gpio_int_mask);
4253                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4254                                    GPIO_INT_MASK_LINK_DOWN);
4255                         writeq(val64, &bar0->gpio_int_mask);
4256                 }
4257                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4258                         val64 = readq(&bar0->adapter_status);
4259                                 /* Enable Adapter */
4260                         val64 = readq(&bar0->adapter_control);
4261                         val64 |= ADAPTER_CNTL_EN;
4262                         writeq(val64, &bar0->adapter_control);
4263                         val64 |= ADAPTER_LED_ON;
4264                         writeq(val64, &bar0->adapter_control);
4265                         if (!sp->device_enabled_once)
4266                                 sp->device_enabled_once = 1;
4267
4268                         s2io_link(sp, LINK_UP);
4269                         /*
4270                          * unmask link down interrupt and mask link-up
4271                          * intr
4272                          */
4273                         val64 = readq(&bar0->gpio_int_mask);
4274                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4275                         val64 |= GPIO_INT_MASK_LINK_UP;
4276                         writeq(val64, &bar0->gpio_int_mask);
4277
4278                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4279                         val64 = readq(&bar0->adapter_status);
4280                         s2io_link(sp, LINK_DOWN);
4281                         /* Link is down so unmaks link up interrupt */
4282                         val64 = readq(&bar0->gpio_int_mask);
4283                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4284                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4285                         writeq(val64, &bar0->gpio_int_mask);
4286
4287                         /* turn off LED */
4288                         val64 = readq(&bar0->adapter_control);
4289                         val64 = val64 &(~ADAPTER_LED_ON);
4290                         writeq(val64, &bar0->adapter_control);
4291                 }
4292         }
4293         val64 = readq(&bar0->gpio_int_mask);
4294 }
4295
4296 /**
4297  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4298  *  @value: alarm bits
4299  *  @addr: address value
4300  *  @cnt: counter variable
4301  *  Description: Check for alarm and increment the counter
4302  *  Return Value:
4303  *  1 - if alarm bit set
4304  *  0 - if alarm bit is not set
4305  */
4306 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4307                           unsigned long long *cnt)
4308 {
4309         u64 val64;
4310         val64 = readq(addr);
4311         if ( val64 & value ) {
4312                 writeq(val64, addr);
4313                 (*cnt)++;
4314                 return 1;
4315         }
4316         return 0;
4317
4318 }
4319
4320 /**
4321  *  s2io_handle_errors - Xframe error indication handler
4322  *  @nic: device private variable
4323  *  Description: Handle alarms such as loss of link, single or
4324  *  double ECC errors, critical and serious errors.
4325  *  Return Value:
4326  *  NONE
4327  */
4328 static void s2io_handle_errors(void * dev_id)
4329 {
4330         struct net_device *dev = (struct net_device *) dev_id;
4331         struct s2io_nic *sp = dev->priv;
4332         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4333         u64 temp64 = 0,val64=0;
4334         int i = 0;
4335
4336         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4337         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4338
4339         if (!is_s2io_card_up(sp))
4340                 return;
4341
4342         if (pci_channel_offline(sp->pdev))
4343                 return;
4344
4345         memset(&sw_stat->ring_full_cnt, 0,
4346                 sizeof(sw_stat->ring_full_cnt));
4347
4348         /* Handling the XPAK counters update */
4349         if(stats->xpak_timer_count < 72000) {
4350                 /* waiting for an hour */
4351                 stats->xpak_timer_count++;
4352         } else {
4353                 s2io_updt_xpak_counter(dev);
4354                 /* reset the count to zero */
4355                 stats->xpak_timer_count = 0;
4356         }
4357
4358         /* Handling link status change error Intr */
4359         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4360                 val64 = readq(&bar0->mac_rmac_err_reg);
4361                 writeq(val64, &bar0->mac_rmac_err_reg);
4362                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4363                         schedule_work(&sp->set_link_task);
4364         }
4365
4366         /* In case of a serious error, the device will be Reset. */
4367         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4368                                 &sw_stat->serious_err_cnt))
4369                 goto reset;
4370
4371         /* Check for data parity error */
4372         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4373                                 &sw_stat->parity_err_cnt))
4374                 goto reset;
4375
4376         /* Check for ring full counter */
4377         if (sp->device_type == XFRAME_II_DEVICE) {
4378                 val64 = readq(&bar0->ring_bump_counter1);
4379                 for (i=0; i<4; i++) {
4380                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4381                         temp64 >>= 64 - ((i+1)*16);
4382                         sw_stat->ring_full_cnt[i] += temp64;
4383                 }
4384
4385                 val64 = readq(&bar0->ring_bump_counter2);
4386                 for (i=0; i<4; i++) {
4387                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4388                         temp64 >>= 64 - ((i+1)*16);
4389                          sw_stat->ring_full_cnt[i+4] += temp64;
4390                 }
4391         }
4392
4393         val64 = readq(&bar0->txdma_int_status);
4394         /*check for pfc_err*/
4395         if (val64 & TXDMA_PFC_INT) {
4396                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4397                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4398                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4399                                 &sw_stat->pfc_err_cnt))
4400                         goto reset;
4401                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4402                                 &sw_stat->pfc_err_cnt);
4403         }
4404
4405         /*check for tda_err*/
4406         if (val64 & TXDMA_TDA_INT) {
4407                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4408                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4409                                 &sw_stat->tda_err_cnt))
4410                         goto reset;
4411                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4412                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4413         }
4414         /*check for pcc_err*/
4415         if (val64 & TXDMA_PCC_INT) {
4416                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4417                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4418                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4419                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4420                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4421                                 &sw_stat->pcc_err_cnt))
4422                         goto reset;
4423                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4424                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4425         }
4426
4427         /*check for tti_err*/
4428         if (val64 & TXDMA_TTI_INT) {
4429                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4430                                 &sw_stat->tti_err_cnt))
4431                         goto reset;
4432                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4433                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4434         }
4435
4436         /*check for lso_err*/
4437         if (val64 & TXDMA_LSO_INT) {
4438                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4439                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4440                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4441                         goto reset;
4442                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4443                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4444         }
4445
4446         /*check for tpa_err*/
4447         if (val64 & TXDMA_TPA_INT) {
4448                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4449                         &sw_stat->tpa_err_cnt))
4450                         goto reset;
4451                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4452                         &sw_stat->tpa_err_cnt);
4453         }
4454
4455         /*check for sm_err*/
4456         if (val64 & TXDMA_SM_INT) {
4457                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4458                         &sw_stat->sm_err_cnt))
4459                         goto reset;
4460         }
4461
4462         val64 = readq(&bar0->mac_int_status);
4463         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4464                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4465                                 &bar0->mac_tmac_err_reg,
4466                                 &sw_stat->mac_tmac_err_cnt))
4467                         goto reset;
4468                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4469                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4470                                 &bar0->mac_tmac_err_reg,
4471                                 &sw_stat->mac_tmac_err_cnt);
4472         }
4473
4474         val64 = readq(&bar0->xgxs_int_status);
4475         if (val64 & XGXS_INT_STATUS_TXGXS) {
4476                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4477                                 &bar0->xgxs_txgxs_err_reg,
4478                                 &sw_stat->xgxs_txgxs_err_cnt))
4479                         goto reset;
4480                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4481                                 &bar0->xgxs_txgxs_err_reg,
4482                                 &sw_stat->xgxs_txgxs_err_cnt);
4483         }
4484
4485         val64 = readq(&bar0->rxdma_int_status);
4486         if (val64 & RXDMA_INT_RC_INT_M) {
4487                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4488                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4489                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4490                         goto reset;
4491                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4492                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4493                                 &sw_stat->rc_err_cnt);
4494                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4495                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4496                                 &sw_stat->prc_pcix_err_cnt))
4497                         goto reset;
4498                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4499                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4500                                 &sw_stat->prc_pcix_err_cnt);
4501         }
4502
4503         if (val64 & RXDMA_INT_RPA_INT_M) {
4504                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4505                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4506                         goto reset;
4507                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4508                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4509         }
4510
4511         if (val64 & RXDMA_INT_RDA_INT_M) {
4512                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4513                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4514                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4515                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4516                         goto reset;
4517                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4518                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4519                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4520         }
4521
4522         if (val64 & RXDMA_INT_RTI_INT_M) {
4523                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4524                                 &sw_stat->rti_err_cnt))
4525                         goto reset;
4526                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4527                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4528         }
4529
4530         val64 = readq(&bar0->mac_int_status);
4531         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4532                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4533                                 &bar0->mac_rmac_err_reg,
4534                                 &sw_stat->mac_rmac_err_cnt))
4535                         goto reset;
4536                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4537                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4538                                 &sw_stat->mac_rmac_err_cnt);
4539         }
4540
4541         val64 = readq(&bar0->xgxs_int_status);
4542         if (val64 & XGXS_INT_STATUS_RXGXS) {
4543                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4544                                 &bar0->xgxs_rxgxs_err_reg,
4545                                 &sw_stat->xgxs_rxgxs_err_cnt))
4546                         goto reset;
4547         }
4548
4549         val64 = readq(&bar0->mc_int_status);
4550         if(val64 & MC_INT_STATUS_MC_INT) {
4551                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4552                                 &sw_stat->mc_err_cnt))
4553                         goto reset;
4554
4555                 /* Handling Ecc errors */
4556                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4557                         writeq(val64, &bar0->mc_err_reg);
4558                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4559                                 sw_stat->double_ecc_errs++;
4560                                 if (sp->device_type != XFRAME_II_DEVICE) {
4561                                         /*
4562                                          * Reset XframeI only if critical error
4563                                          */
4564                                         if (val64 &
4565                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4566                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4567                                                                 goto reset;
4568                                         }
4569                         } else
4570                                 sw_stat->single_ecc_errs++;
4571                 }
4572         }
4573         return;
4574
4575 reset:
4576         netif_stop_queue(dev);
4577         schedule_work(&sp->rst_timer_task);
4578         sw_stat->soft_reset_cnt++;
4579         return;
4580 }
4581
4582 /**
4583  *  s2io_isr - ISR handler of the device .
4584  *  @irq: the irq of the device.
4585  *  @dev_id: a void pointer to the dev structure of the NIC.
4586  *  Description:  This function is the ISR handler of the device. It
4587  *  identifies the reason for the interrupt and calls the relevant
4588  *  service routines. As a contongency measure, this ISR allocates the
4589  *  recv buffers, if their numbers are below the panic value which is
4590  *  presently set to 25% of the original number of rcv buffers allocated.
4591  *  Return value:
4592  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4593  *   IRQ_NONE: will be returned if interrupt is not from our device
4594  */
4595 static irqreturn_t s2io_isr(int irq, void *dev_id)
4596 {
4597         struct net_device *dev = (struct net_device *) dev_id;
4598         struct s2io_nic *sp = dev->priv;
4599         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4600         int i;
4601         u64 reason = 0;
4602         struct mac_info *mac_control;
4603         struct config_param *config;
4604
4605         /* Pretend we handled any irq's from a disconnected card */
4606         if (pci_channel_offline(sp->pdev))
4607                 return IRQ_NONE;
4608
4609         if (!is_s2io_card_up(sp))
4610                 return IRQ_NONE;
4611
4612         mac_control = &sp->mac_control;
4613         config = &sp->config;
4614
4615         /*
4616          * Identify the cause for interrupt and call the appropriate
4617          * interrupt handler. Causes for the interrupt could be;
4618          * 1. Rx of packet.
4619          * 2. Tx complete.
4620          * 3. Link down.
4621          */
4622         reason = readq(&bar0->general_int_status);
4623
4624         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4625                 /* Nothing much can be done. Get out */
4626                 return IRQ_HANDLED;
4627         }
4628
4629         if (reason & (GEN_INTR_RXTRAFFIC |
4630                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4631         {
4632                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4633
4634                 if (config->napi) {
4635                         if (reason & GEN_INTR_RXTRAFFIC) {
4636                                 if (likely(netif_rx_schedule_prep(dev,
4637                                                         &sp->napi))) {
4638                                         __netif_rx_schedule(dev, &sp->napi);
4639                                         writeq(S2IO_MINUS_ONE,
4640                                                &bar0->rx_traffic_mask);
4641                                 } else
4642                                         writeq(S2IO_MINUS_ONE,
4643                                                &bar0->rx_traffic_int);
4644                         }
4645                 } else {
4646                         /*
4647                          * rx_traffic_int reg is an R1 register, writing all 1's
4648                          * will ensure that the actual interrupt causing bit
4649                          * get's cleared and hence a read can be avoided.
4650                          */
4651                         if (reason & GEN_INTR_RXTRAFFIC)
4652                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4653
4654                         for (i = 0; i < config->rx_ring_num; i++)
4655                                 rx_intr_handler(&mac_control->rings[i]);
4656                 }
4657
4658                 /*
4659                  * tx_traffic_int reg is an R1 register, writing all 1's
4660                  * will ensure that the actual interrupt causing bit get's
4661                  * cleared and hence a read can be avoided.
4662                  */
4663                 if (reason & GEN_INTR_TXTRAFFIC)
4664                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4665
4666                 for (i = 0; i < config->tx_fifo_num; i++)
4667                         tx_intr_handler(&mac_control->fifos[i]);
4668
4669                 if (reason & GEN_INTR_TXPIC)
4670                         s2io_txpic_intr_handle(sp);
4671
4672                 /*
4673                  * Reallocate the buffers from the interrupt handler itself.
4674                  */
4675                 if (!config->napi) {
4676                         for (i = 0; i < config->rx_ring_num; i++)
4677                                 s2io_chk_rx_buffers(sp, i);
4678                 }
4679                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4680                 readl(&bar0->general_int_status);
4681
4682                 return IRQ_HANDLED;
4683
4684         }
4685         else if (!reason) {
4686                 /* The interrupt was not raised by us */
4687                 return IRQ_NONE;
4688         }
4689
4690         return IRQ_HANDLED;
4691 }
4692
4693 /**
4694  * s2io_updt_stats -
4695  */
4696 static void s2io_updt_stats(struct s2io_nic *sp)
4697 {
4698         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4699         u64 val64;
4700         int cnt = 0;
4701
4702         if (is_s2io_card_up(sp)) {
4703                 /* Apprx 30us on a 133 MHz bus */
4704                 val64 = SET_UPDT_CLICKS(10) |
4705                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4706                 writeq(val64, &bar0->stat_cfg);
4707                 do {
4708                         udelay(100);
4709                         val64 = readq(&bar0->stat_cfg);
4710                         if (!(val64 & s2BIT(0)))
4711                                 break;
4712                         cnt++;
4713                         if (cnt == 5)
4714                                 break; /* Updt failed */
4715                 } while(1);
4716         }
4717 }
4718
4719 /**
4720  *  s2io_get_stats - Updates the device statistics structure.
4721  *  @dev : pointer to the device structure.
4722  *  Description:
4723  *  This function updates the device statistics structure in the s2io_nic
4724  *  structure and returns a pointer to the same.
4725  *  Return value:
4726  *  pointer to the updated net_device_stats structure.
4727  */
4728
4729 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4730 {
4731         struct s2io_nic *sp = dev->priv;
4732         struct mac_info *mac_control;
4733         struct config_param *config;
4734
4735
4736         mac_control = &sp->mac_control;
4737         config = &sp->config;
4738
4739         /* Configure Stats for immediate updt */
4740         s2io_updt_stats(sp);
4741
4742         sp->stats.tx_packets =
4743                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4744         sp->stats.tx_errors =
4745                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4746         sp->stats.rx_errors =
4747                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4748         sp->stats.multicast =
4749                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4750         sp->stats.rx_length_errors =
4751                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4752
4753         return (&sp->stats);
4754 }
4755
4756 /**
4757  *  s2io_set_multicast - entry point for multicast address enable/disable.
4758  *  @dev : pointer to the device structure
4759  *  Description:
4760  *  This function is a driver entry point which gets called by the kernel
4761  *  whenever multicast addresses must be enabled/disabled. This also gets
4762  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4763  *  determine, if multicast address must be enabled or if promiscuous mode
4764  *  is to be disabled etc.
4765  *  Return value:
4766  *  void.
4767  */
4768
4769 static void s2io_set_multicast(struct net_device *dev)
4770 {
4771         int i, j, prev_cnt;
4772         struct dev_mc_list *mclist;
4773         struct s2io_nic *sp = dev->priv;
4774         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4775         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4776             0xfeffffffffffULL;
4777         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4778         void __iomem *add;
4779         struct config_param *config = &sp->config;
4780
4781         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4782                 /*  Enable all Multicast addresses */
4783                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4784                        &bar0->rmac_addr_data0_mem);
4785                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4786                        &bar0->rmac_addr_data1_mem);
4787                 val64 = RMAC_ADDR_CMD_MEM_WE |
4788                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4789                     RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4790                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4791                 /* Wait till command completes */
4792                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4793                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4794                                         S2IO_BIT_RESET);
4795
4796                 sp->m_cast_flg = 1;
4797                 sp->all_multi_pos = config->max_mc_addr - 1;
4798         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4799                 /*  Disable all Multicast addresses */
4800                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4801                        &bar0->rmac_addr_data0_mem);
4802                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4803                        &bar0->rmac_addr_data1_mem);
4804                 val64 = RMAC_ADDR_CMD_MEM_WE |
4805                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4806                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4807                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4808                 /* Wait till command completes */
4809                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4810                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4811                                         S2IO_BIT_RESET);
4812
4813                 sp->m_cast_flg = 0;
4814                 sp->all_multi_pos = 0;
4815         }
4816
4817         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4818                 /*  Put the NIC into promiscuous mode */
4819                 add = &bar0->mac_cfg;
4820                 val64 = readq(&bar0->mac_cfg);
4821                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4822
4823                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4824                 writel((u32) val64, add);
4825                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4826                 writel((u32) (val64 >> 32), (add + 4));
4827
4828                 if (vlan_tag_strip != 1) {
4829                         val64 = readq(&bar0->rx_pa_cfg);
4830                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4831                         writeq(val64, &bar0->rx_pa_cfg);
4832                         vlan_strip_flag = 0;
4833                 }
4834
4835                 val64 = readq(&bar0->mac_cfg);
4836                 sp->promisc_flg = 1;
4837                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4838                           dev->name);
4839         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4840                 /*  Remove the NIC from promiscuous mode */
4841                 add = &bar0->mac_cfg;
4842                 val64 = readq(&bar0->mac_cfg);
4843                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4844
4845                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4846                 writel((u32) val64, add);
4847                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4848                 writel((u32) (val64 >> 32), (add + 4));
4849
4850                 if (vlan_tag_strip != 0) {
4851                         val64 = readq(&bar0->rx_pa_cfg);
4852                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4853                         writeq(val64, &bar0->rx_pa_cfg);
4854                         vlan_strip_flag = 1;
4855                 }
4856
4857                 val64 = readq(&bar0->mac_cfg);
4858                 sp->promisc_flg = 0;
4859                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4860                           dev->name);
4861         }
4862
4863         /*  Update individual M_CAST address list */
4864         if ((!sp->m_cast_flg) && dev->mc_count) {
4865                 if (dev->mc_count >
4866                     (config->max_mc_addr - config->max_mac_addr)) {
4867                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4868                                   dev->name);
4869                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4870                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4871                         return;
4872                 }
4873
4874                 prev_cnt = sp->mc_addr_count;
4875                 sp->mc_addr_count = dev->mc_count;
4876
4877                 /* Clear out the previous list of Mc in the H/W. */
4878                 for (i = 0; i < prev_cnt; i++) {
4879                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4880                                &bar0->rmac_addr_data0_mem);
4881                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4882                                 &bar0->rmac_addr_data1_mem);
4883                         val64 = RMAC_ADDR_CMD_MEM_WE |
4884                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4885                             RMAC_ADDR_CMD_MEM_OFFSET
4886                             (config->mc_start_offset + i);
4887                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4888
4889                         /* Wait for command completes */
4890                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4891                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4892                                         S2IO_BIT_RESET)) {
4893                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4894                                           dev->name);
4895                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4896                                 return;
4897                         }
4898                 }
4899
4900                 /* Create the new Rx filter list and update the same in H/W. */
4901                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4902                      i++, mclist = mclist->next) {
4903                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4904                                ETH_ALEN);
4905                         mac_addr = 0;
4906                         for (j = 0; j < ETH_ALEN; j++) {
4907                                 mac_addr |= mclist->dmi_addr[j];
4908                                 mac_addr <<= 8;
4909                         }
4910                         mac_addr >>= 8;
4911                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4912                                &bar0->rmac_addr_data0_mem);
4913                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4914                                 &bar0->rmac_addr_data1_mem);
4915                         val64 = RMAC_ADDR_CMD_MEM_WE |
4916                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4917                             RMAC_ADDR_CMD_MEM_OFFSET
4918                             (i + config->mc_start_offset);
4919                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4920
4921                         /* Wait for command completes */
4922                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4923                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4924                                         S2IO_BIT_RESET)) {
4925                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4926                                           dev->name);
4927                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4928                                 return;
4929                         }
4930                 }
4931         }
4932 }
4933
4934 /* read from CAM unicast & multicast addresses and store it in
4935  * def_mac_addr structure
4936  */
4937 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
4938 {
4939         int offset;
4940         u64 mac_addr = 0x0;
4941         struct config_param *config = &sp->config;
4942
4943         /* store unicast & multicast mac addresses */
4944         for (offset = 0; offset < config->max_mc_addr; offset++) {
4945                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
4946                 /* if read fails disable the entry */
4947                 if (mac_addr == FAILURE)
4948                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
4949                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
4950         }
4951 }
4952
4953 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
4954 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
4955 {
4956         int offset;
4957         struct config_param *config = &sp->config;
4958         /* restore unicast mac address */
4959         for (offset = 0; offset < config->max_mac_addr; offset++)
4960                 do_s2io_prog_unicast(sp->dev,
4961                         sp->def_mac_addr[offset].mac_addr);
4962
4963         /* restore multicast mac address */
4964         for (offset = config->mc_start_offset;
4965                 offset < config->max_mc_addr; offset++)
4966                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
4967 }
4968
4969 /* add a multicast MAC address to CAM */
4970 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
4971 {
4972         int i;
4973         u64 mac_addr = 0;
4974         struct config_param *config = &sp->config;
4975
4976         for (i = 0; i < ETH_ALEN; i++) {
4977                 mac_addr <<= 8;
4978                 mac_addr |= addr[i];
4979         }
4980         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
4981                 return SUCCESS;
4982
4983         /* check if the multicast mac already preset in CAM */
4984         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
4985                 u64 tmp64;
4986                 tmp64 = do_s2io_read_unicast_mc(sp, i);
4987                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
4988                         break;
4989
4990                 if (tmp64 == mac_addr)
4991                         return SUCCESS;
4992         }
4993         if (i == config->max_mc_addr) {
4994                 DBG_PRINT(ERR_DBG,
4995                         "CAM full no space left for multicast MAC\n");
4996                 return FAILURE;
4997         }
4998         /* Update the internal structure with this new mac address */
4999         do_s2io_copy_mac_addr(sp, i, mac_addr);
5000
5001         return (do_s2io_add_mac(sp, mac_addr, i));
5002 }
5003
5004 /* add MAC address to CAM */
5005 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5006 {
5007         u64 val64;
5008         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5009
5010         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5011                 &bar0->rmac_addr_data0_mem);
5012
5013         val64 =
5014                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5015                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5016         writeq(val64, &bar0->rmac_addr_cmd_mem);
5017
5018         /* Wait till command completes */
5019         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5020                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5021                 S2IO_BIT_RESET)) {
5022                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5023                 return FAILURE;
5024         }
5025         return SUCCESS;
5026 }
5027 /* deletes a specified unicast/multicast mac entry from CAM */
5028 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5029 {
5030         int offset;
5031         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5032         struct config_param *config = &sp->config;
5033
5034         for (offset = 1;
5035                 offset < config->max_mc_addr; offset++) {
5036                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5037                 if (tmp64 == addr) {
5038                         /* disable the entry by writing  0xffffffffffffULL */
5039                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5040                                 return FAILURE;
5041                         /* store the new mac list from CAM */
5042                         do_s2io_store_unicast_mc(sp);
5043                         return SUCCESS;
5044                 }
5045         }
5046         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5047                         (unsigned long long)addr);
5048         return FAILURE;
5049 }
5050
5051 /* read mac entries from CAM */
5052 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5053 {
5054         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5055         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5056
5057         /* read mac addr */
5058         val64 =
5059                 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5060                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5061         writeq(val64, &bar0->rmac_addr_cmd_mem);
5062
5063         /* Wait till command completes */
5064         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5065                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5066                 S2IO_BIT_RESET)) {
5067                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5068                 return FAILURE;
5069         }
5070         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5071         return (tmp64 >> 16);
5072 }
5073
5074 /**
5075  * s2io_set_mac_addr driver entry point
5076  */
5077
5078 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5079 {
5080         struct sockaddr *addr = p;
5081
5082         if (!is_valid_ether_addr(addr->sa_data))
5083                 return -EINVAL;
5084
5085         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5086
5087         /* store the MAC address in CAM */
5088         return (do_s2io_prog_unicast(dev, dev->dev_addr));
5089 }
5090 /**
5091  *  do_s2io_prog_unicast - Programs the Xframe mac address
5092  *  @dev : pointer to the device structure.
5093  *  @addr: a uchar pointer to the new mac address which is to be set.
5094  *  Description : This procedure will program the Xframe to receive
5095  *  frames with new Mac Address
5096  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5097  *  as defined in errno.h file on failure.
5098  */
5099
5100 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5101 {
5102         struct s2io_nic *sp = dev->priv;
5103         register u64 mac_addr = 0, perm_addr = 0;
5104         int i;
5105         u64 tmp64;
5106         struct config_param *config = &sp->config;
5107
5108         /*
5109         * Set the new MAC address as the new unicast filter and reflect this
5110         * change on the device address registered with the OS. It will be
5111         * at offset 0.
5112         */
5113         for (i = 0; i < ETH_ALEN; i++) {
5114                 mac_addr <<= 8;
5115                 mac_addr |= addr[i];
5116                 perm_addr <<= 8;
5117                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5118         }
5119
5120         /* check if the dev_addr is different than perm_addr */
5121         if (mac_addr == perm_addr)
5122                 return SUCCESS;
5123
5124         /* check if the mac already preset in CAM */
5125         for (i = 1; i < config->max_mac_addr; i++) {
5126                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5127                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5128                         break;
5129
5130                 if (tmp64 == mac_addr) {
5131                         DBG_PRINT(INFO_DBG,
5132                         "MAC addr:0x%llx already present in CAM\n",
5133                         (unsigned long long)mac_addr);
5134                         return SUCCESS;
5135                 }
5136         }
5137         if (i == config->max_mac_addr) {
5138                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5139                 return FAILURE;
5140         }
5141         /* Update the internal structure with this new mac address */
5142         do_s2io_copy_mac_addr(sp, i, mac_addr);
5143         return (do_s2io_add_mac(sp, mac_addr, i));
5144 }
5145
5146 /**
5147  * s2io_ethtool_sset - Sets different link parameters.
5148  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5149  * @info: pointer to the structure with parameters given by ethtool to set
5150  * link information.
5151  * Description:
5152  * The function sets different link parameters provided by the user onto
5153  * the NIC.
5154  * Return value:
5155  * 0 on success.
5156 */
5157
5158 static int s2io_ethtool_sset(struct net_device *dev,
5159                              struct ethtool_cmd *info)
5160 {
5161         struct s2io_nic *sp = dev->priv;
5162         if ((info->autoneg == AUTONEG_ENABLE) ||
5163             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5164                 return -EINVAL;
5165         else {
5166                 s2io_close(sp->dev);
5167                 s2io_open(sp->dev);
5168         }
5169
5170         return 0;
5171 }
5172
5173 /**
5174  * s2io_ethtol_gset - Return link specific information.
5175  * @sp : private member of the device structure, pointer to the
5176  *      s2io_nic structure.
5177  * @info : pointer to the structure with parameters given by ethtool
5178  * to return link information.
5179  * Description:
5180  * Returns link specific information like speed, duplex etc.. to ethtool.
5181  * Return value :
5182  * return 0 on success.
5183  */
5184
5185 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5186 {
5187         struct s2io_nic *sp = dev->priv;
5188         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5189         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5190         info->port = PORT_FIBRE;
5191
5192         /* info->transceiver */
5193         info->transceiver = XCVR_EXTERNAL;
5194
5195         if (netif_carrier_ok(sp->dev)) {
5196                 info->speed = 10000;
5197                 info->duplex = DUPLEX_FULL;
5198         } else {
5199                 info->speed = -1;
5200                 info->duplex = -1;
5201         }
5202
5203         info->autoneg = AUTONEG_DISABLE;
5204         return 0;
5205 }
5206
5207 /**
5208  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5209  * @sp : private member of the device structure, which is a pointer to the
5210  * s2io_nic structure.
5211  * @info : pointer to the structure with parameters given by ethtool to
5212  * return driver information.
5213  * Description:
5214  * Returns driver specefic information like name, version etc.. to ethtool.
5215  * Return value:
5216  *  void
5217  */
5218
5219 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5220                                   struct ethtool_drvinfo *info)
5221 {
5222         struct s2io_nic *sp = dev->priv;
5223
5224         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5225         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5226         strncpy(info->fw_version, "", sizeof(info->fw_version));
5227         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5228         info->regdump_len = XENA_REG_SPACE;
5229         info->eedump_len = XENA_EEPROM_SPACE;
5230 }
5231
5232 /**
5233  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5234  *  @sp: private member of the device structure, which is a pointer to the
5235  *  s2io_nic structure.
5236  *  @regs : pointer to the structure with parameters given by ethtool for
5237  *  dumping the registers.
5238  *  @reg_space: The input argumnet into which all the registers are dumped.
5239  *  Description:
5240  *  Dumps the entire register space of xFrame NIC into the user given
5241  *  buffer area.
5242  * Return value :
5243  * void .
5244 */
5245
5246 static void s2io_ethtool_gregs(struct net_device *dev,
5247                                struct ethtool_regs *regs, void *space)
5248 {
5249         int i;
5250         u64 reg;
5251         u8 *reg_space = (u8 *) space;
5252         struct s2io_nic *sp = dev->priv;
5253
5254         regs->len = XENA_REG_SPACE;
5255         regs->version = sp->pdev->subsystem_device;
5256
5257         for (i = 0; i < regs->len; i += 8) {
5258                 reg = readq(sp->bar0 + i);
5259                 memcpy((reg_space + i), &reg, 8);
5260         }
5261 }
5262
5263 /**
5264  *  s2io_phy_id  - timer function that alternates adapter LED.
5265  *  @data : address of the private member of the device structure, which
5266  *  is a pointer to the s2io_nic structure, provided as an u32.
5267  * Description: This is actually the timer function that alternates the
5268  * adapter LED bit of the adapter control bit to set/reset every time on
5269  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5270  *  once every second.
5271 */
5272 static void s2io_phy_id(unsigned long data)
5273 {
5274         struct s2io_nic *sp = (struct s2io_nic *) data;
5275         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5276         u64 val64 = 0;
5277         u16 subid;
5278
5279         subid = sp->pdev->subsystem_device;
5280         if ((sp->device_type == XFRAME_II_DEVICE) ||
5281                    ((subid & 0xFF) >= 0x07)) {
5282                 val64 = readq(&bar0->gpio_control);
5283                 val64 ^= GPIO_CTRL_GPIO_0;
5284                 writeq(val64, &bar0->gpio_control);
5285         } else {
5286                 val64 = readq(&bar0->adapter_control);
5287                 val64 ^= ADAPTER_LED_ON;
5288                 writeq(val64, &bar0->adapter_control);
5289         }
5290
5291         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5292 }
5293
5294 /**
5295  * s2io_ethtool_idnic - To physically identify the nic on the system.
5296  * @sp : private member of the device structure, which is a pointer to the
5297  * s2io_nic structure.
5298  * @id : pointer to the structure with identification parameters given by
5299  * ethtool.
5300  * Description: Used to physically identify the NIC on the system.
5301  * The Link LED will blink for a time specified by the user for
5302  * identification.
5303  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5304  * identification is possible only if it's link is up.
5305  * Return value:
5306  * int , returns 0 on success
5307  */
5308
5309 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5310 {
5311         u64 val64 = 0, last_gpio_ctrl_val;
5312         struct s2io_nic *sp = dev->priv;
5313         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5314         u16 subid;
5315
5316         subid = sp->pdev->subsystem_device;
5317         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5318         if ((sp->device_type == XFRAME_I_DEVICE) &&
5319                 ((subid & 0xFF) < 0x07)) {
5320                 val64 = readq(&bar0->adapter_control);
5321                 if (!(val64 & ADAPTER_CNTL_EN)) {
5322                         printk(KERN_ERR
5323                                "Adapter Link down, cannot blink LED\n");
5324                         return -EFAULT;
5325                 }
5326         }
5327         if (sp->id_timer.function == NULL) {
5328                 init_timer(&sp->id_timer);
5329                 sp->id_timer.function = s2io_phy_id;
5330                 sp->id_timer.data = (unsigned long) sp;
5331         }
5332         mod_timer(&sp->id_timer, jiffies);
5333         if (data)
5334                 msleep_interruptible(data * HZ);
5335         else
5336                 msleep_interruptible(MAX_FLICKER_TIME);
5337         del_timer_sync(&sp->id_timer);
5338
5339         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5340                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5341                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5342         }
5343
5344         return 0;
5345 }
5346
5347 static void s2io_ethtool_gringparam(struct net_device *dev,
5348                                     struct ethtool_ringparam *ering)
5349 {
5350         struct s2io_nic *sp = dev->priv;
5351         int i,tx_desc_count=0,rx_desc_count=0;
5352
5353         if (sp->rxd_mode == RXD_MODE_1)
5354                 ering->rx_max_pending = MAX_RX_DESC_1;
5355         else if (sp->rxd_mode == RXD_MODE_3B)
5356                 ering->rx_max_pending = MAX_RX_DESC_2;
5357
5358         ering->tx_max_pending = MAX_TX_DESC;
5359         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5360                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5361
5362         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5363         ering->tx_pending = tx_desc_count;
5364         rx_desc_count = 0;
5365         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5366                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5367
5368         ering->rx_pending = rx_desc_count;
5369
5370         ering->rx_mini_max_pending = 0;
5371         ering->rx_mini_pending = 0;
5372         if(sp->rxd_mode == RXD_MODE_1)
5373                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5374         else if (sp->rxd_mode == RXD_MODE_3B)
5375                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5376         ering->rx_jumbo_pending = rx_desc_count;
5377 }
5378
5379 /**
5380  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5381  * @sp : private member of the device structure, which is a pointer to the
5382  *      s2io_nic structure.
5383  * @ep : pointer to the structure with pause parameters given by ethtool.
5384  * Description:
5385  * Returns the Pause frame generation and reception capability of the NIC.
5386  * Return value:
5387  *  void
5388  */
5389 static void s2io_ethtool_getpause_data(struct net_device *dev,
5390                                        struct ethtool_pauseparam *ep)
5391 {
5392         u64 val64;
5393         struct s2io_nic *sp = dev->priv;
5394         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5395
5396         val64 = readq(&bar0->rmac_pause_cfg);
5397         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5398                 ep->tx_pause = TRUE;
5399         if (val64 & RMAC_PAUSE_RX_ENABLE)
5400                 ep->rx_pause = TRUE;
5401         ep->autoneg = FALSE;
5402 }
5403
5404 /**
5405  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5406  * @sp : private member of the device structure, which is a pointer to the
5407  *      s2io_nic structure.
5408  * @ep : pointer to the structure with pause parameters given by ethtool.
5409  * Description:
5410  * It can be used to set or reset Pause frame generation or reception
5411  * support of the NIC.
5412  * Return value:
5413  * int, returns 0 on Success
5414  */
5415
5416 static int s2io_ethtool_setpause_data(struct net_device *dev,
5417                                struct ethtool_pauseparam *ep)
5418 {
5419         u64 val64;
5420         struct s2io_nic *sp = dev->priv;
5421         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5422
5423         val64 = readq(&bar0->rmac_pause_cfg);
5424         if (ep->tx_pause)
5425                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5426         else
5427                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5428         if (ep->rx_pause)
5429                 val64 |= RMAC_PAUSE_RX_ENABLE;
5430         else
5431                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5432         writeq(val64, &bar0->rmac_pause_cfg);
5433         return 0;
5434 }
5435
5436 /**
5437  * read_eeprom - reads 4 bytes of data from user given offset.
5438  * @sp : private member of the device structure, which is a pointer to the
5439  *      s2io_nic structure.
5440  * @off : offset at which the data must be written
5441  * @data : Its an output parameter where the data read at the given
5442  *      offset is stored.
5443  * Description:
5444  * Will read 4 bytes of data from the user given offset and return the
5445  * read data.
5446  * NOTE: Will allow to read only part of the EEPROM visible through the
5447  *   I2C bus.
5448  * Return value:
5449  *  -1 on failure and 0 on success.
5450  */
5451
5452 #define S2IO_DEV_ID             5
5453 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5454 {
5455         int ret = -1;
5456         u32 exit_cnt = 0;
5457         u64 val64;
5458         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5459
5460         if (sp->device_type == XFRAME_I_DEVICE) {
5461                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5462                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5463                     I2C_CONTROL_CNTL_START;
5464                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5465
5466                 while (exit_cnt < 5) {
5467                         val64 = readq(&bar0->i2c_control);
5468                         if (I2C_CONTROL_CNTL_END(val64)) {
5469                                 *data = I2C_CONTROL_GET_DATA(val64);
5470                                 ret = 0;
5471                                 break;
5472                         }
5473                         msleep(50);
5474                         exit_cnt++;
5475                 }
5476         }
5477
5478         if (sp->device_type == XFRAME_II_DEVICE) {
5479                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5480                         SPI_CONTROL_BYTECNT(0x3) |
5481                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5482                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5483                 val64 |= SPI_CONTROL_REQ;
5484                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5485                 while (exit_cnt < 5) {
5486                         val64 = readq(&bar0->spi_control);
5487                         if (val64 & SPI_CONTROL_NACK) {
5488                                 ret = 1;
5489                                 break;
5490                         } else if (val64 & SPI_CONTROL_DONE) {
5491                                 *data = readq(&bar0->spi_data);
5492                                 *data &= 0xffffff;
5493                                 ret = 0;
5494                                 break;
5495                         }
5496                         msleep(50);
5497                         exit_cnt++;
5498                 }
5499         }
5500         return ret;
5501 }
5502
5503 /**
5504  *  write_eeprom - actually writes the relevant part of the data value.
5505  *  @sp : private member of the device structure, which is a pointer to the
5506  *       s2io_nic structure.
5507  *  @off : offset at which the data must be written
5508  *  @data : The data that is to be written
5509  *  @cnt : Number of bytes of the data that are actually to be written into
5510  *  the Eeprom. (max of 3)
5511  * Description:
5512  *  Actually writes the relevant part of the data value into the Eeprom
5513  *  through the I2C bus.
5514  * Return value:
5515  *  0 on success, -1 on failure.
5516  */
5517
5518 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5519 {
5520         int exit_cnt = 0, ret = -1;
5521         u64 val64;
5522         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5523
5524         if (sp->device_type == XFRAME_I_DEVICE) {
5525                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5526                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5527                     I2C_CONTROL_CNTL_START;
5528                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5529
5530                 while (exit_cnt < 5) {
5531                         val64 = readq(&bar0->i2c_control);
5532                         if (I2C_CONTROL_CNTL_END(val64)) {
5533                                 if (!(val64 & I2C_CONTROL_NACK))
5534                                         ret = 0;
5535                                 break;
5536                         }
5537                         msleep(50);
5538                         exit_cnt++;
5539                 }
5540         }
5541
5542         if (sp->device_type == XFRAME_II_DEVICE) {
5543                 int write_cnt = (cnt == 8) ? 0 : cnt;
5544                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5545
5546                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5547                         SPI_CONTROL_BYTECNT(write_cnt) |
5548                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5549                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5550                 val64 |= SPI_CONTROL_REQ;
5551                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5552                 while (exit_cnt < 5) {
5553                         val64 = readq(&bar0->spi_control);
5554                         if (val64 & SPI_CONTROL_NACK) {
5555                                 ret = 1;
5556                                 break;
5557                         } else if (val64 & SPI_CONTROL_DONE) {
5558                                 ret = 0;
5559                                 break;
5560                         }
5561                         msleep(50);
5562                         exit_cnt++;
5563                 }
5564         }
5565         return ret;
5566 }
5567 static void s2io_vpd_read(struct s2io_nic *nic)
5568 {
5569         u8 *vpd_data;
5570         u8 data;
5571         int i=0, cnt, fail = 0;
5572         int vpd_addr = 0x80;
5573
5574         if (nic->device_type == XFRAME_II_DEVICE) {
5575                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5576                 vpd_addr = 0x80;
5577         }
5578         else {
5579                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5580                 vpd_addr = 0x50;
5581         }
5582         strcpy(nic->serial_num, "NOT AVAILABLE");
5583
5584         vpd_data = kmalloc(256, GFP_KERNEL);
5585         if (!vpd_data) {
5586                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5587                 return;
5588         }
5589         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5590
5591         for (i = 0; i < 256; i +=4 ) {
5592                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5593                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5594                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5595                 for (cnt = 0; cnt <5; cnt++) {
5596                         msleep(2);
5597                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5598                         if (data == 0x80)
5599                                 break;
5600                 }
5601                 if (cnt >= 5) {
5602                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5603                         fail = 1;
5604                         break;
5605                 }
5606                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5607                                       (u32 *)&vpd_data[i]);
5608         }
5609
5610         if(!fail) {
5611                 /* read serial number of adapter */
5612                 for (cnt = 0; cnt < 256; cnt++) {
5613                 if ((vpd_data[cnt] == 'S') &&
5614                         (vpd_data[cnt+1] == 'N') &&
5615                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5616                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5617                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5618                                         vpd_data[cnt+2]);
5619                                 break;
5620                         }
5621                 }
5622         }
5623
5624         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5625                 memset(nic->product_name, 0, vpd_data[1]);
5626                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5627         }
5628         kfree(vpd_data);
5629         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5630 }
5631
5632 /**
5633  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5634  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5635  *  @eeprom : pointer to the user level structure provided by ethtool,
5636  *  containing all relevant information.
5637  *  @data_buf : user defined value to be written into Eeprom.
5638  *  Description: Reads the values stored in the Eeprom at given offset
5639  *  for a given length. Stores these values int the input argument data
5640  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5641  *  Return value:
5642  *  int  0 on success
5643  */
5644
5645 static int s2io_ethtool_geeprom(struct net_device *dev,
5646                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5647 {
5648         u32 i, valid;
5649         u64 data;
5650         struct s2io_nic *sp = dev->priv;
5651
5652         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5653
5654         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5655                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5656
5657         for (i = 0; i < eeprom->len; i += 4) {
5658                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5659                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5660                         return -EFAULT;
5661                 }
5662                 valid = INV(data);
5663                 memcpy((data_buf + i), &valid, 4);
5664         }
5665         return 0;
5666 }
5667
5668 /**
5669  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5670  *  @sp : private member of the device structure, which is a pointer to the
5671  *  s2io_nic structure.
5672  *  @eeprom : pointer to the user level structure provided by ethtool,
5673  *  containing all relevant information.
5674  *  @data_buf ; user defined value to be written into Eeprom.
5675  *  Description:
5676  *  Tries to write the user provided value in the Eeprom, at the offset
5677  *  given by the user.
5678  *  Return value:
5679  *  0 on success, -EFAULT on failure.
5680  */
5681
5682 static int s2io_ethtool_seeprom(struct net_device *dev,
5683                                 struct ethtool_eeprom *eeprom,
5684                                 u8 * data_buf)
5685 {
5686         int len = eeprom->len, cnt = 0;
5687         u64 valid = 0, data;
5688         struct s2io_nic *sp = dev->priv;
5689
5690         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5691                 DBG_PRINT(ERR_DBG,
5692                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5693                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5694                           eeprom->magic);
5695                 return -EFAULT;
5696         }
5697
5698         while (len) {
5699                 data = (u32) data_buf[cnt] & 0x000000FF;
5700                 if (data) {
5701                         valid = (u32) (data << 24);
5702                 } else
5703                         valid = data;
5704
5705                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5706                         DBG_PRINT(ERR_DBG,
5707                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5708                         DBG_PRINT(ERR_DBG,
5709                                   "write into the specified offset\n");
5710                         return -EFAULT;
5711                 }
5712                 cnt++;
5713                 len--;
5714         }
5715
5716         return 0;
5717 }
5718
5719 /**
5720  * s2io_register_test - reads and writes into all clock domains.
5721  * @sp : private member of the device structure, which is a pointer to the
5722  * s2io_nic structure.
5723  * @data : variable that returns the result of each of the test conducted b
5724  * by the driver.
5725  * Description:
5726  * Read and write into all clock domains. The NIC has 3 clock domains,
5727  * see that registers in all the three regions are accessible.
5728  * Return value:
5729  * 0 on success.
5730  */
5731
5732 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5733 {
5734         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5735         u64 val64 = 0, exp_val;
5736         int fail = 0;
5737
5738         val64 = readq(&bar0->pif_rd_swapper_fb);
5739         if (val64 != 0x123456789abcdefULL) {
5740                 fail = 1;
5741                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5742         }
5743
5744         val64 = readq(&bar0->rmac_pause_cfg);
5745         if (val64 != 0xc000ffff00000000ULL) {
5746                 fail = 1;
5747                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5748         }
5749
5750         val64 = readq(&bar0->rx_queue_cfg);
5751         if (sp->device_type == XFRAME_II_DEVICE)
5752                 exp_val = 0x0404040404040404ULL;
5753         else
5754                 exp_val = 0x0808080808080808ULL;
5755         if (val64 != exp_val) {
5756                 fail = 1;
5757                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5758         }
5759
5760         val64 = readq(&bar0->xgxs_efifo_cfg);
5761         if (val64 != 0x000000001923141EULL) {
5762                 fail = 1;
5763                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5764         }
5765
5766         val64 = 0x5A5A5A5A5A5A5A5AULL;
5767         writeq(val64, &bar0->xmsi_data);
5768         val64 = readq(&bar0->xmsi_data);
5769         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5770                 fail = 1;
5771                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5772         }
5773
5774         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5775         writeq(val64, &bar0->xmsi_data);
5776         val64 = readq(&bar0->xmsi_data);
5777         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5778                 fail = 1;
5779                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5780         }
5781
5782         *data = fail;
5783         return fail;
5784 }
5785
5786 /**
5787  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5788  * @sp : private member of the device structure, which is a pointer to the
5789  * s2io_nic structure.
5790  * @data:variable that returns the result of each of the test conducted by
5791  * the driver.
5792  * Description:
5793  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5794  * register.
5795  * Return value:
5796  * 0 on success.
5797  */
5798
5799 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5800 {
5801         int fail = 0;
5802         u64 ret_data, org_4F0, org_7F0;
5803         u8 saved_4F0 = 0, saved_7F0 = 0;
5804         struct net_device *dev = sp->dev;
5805
5806         /* Test Write Error at offset 0 */
5807         /* Note that SPI interface allows write access to all areas
5808          * of EEPROM. Hence doing all negative testing only for Xframe I.
5809          */
5810         if (sp->device_type == XFRAME_I_DEVICE)
5811                 if (!write_eeprom(sp, 0, 0, 3))
5812                         fail = 1;
5813
5814         /* Save current values at offsets 0x4F0 and 0x7F0 */
5815         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5816                 saved_4F0 = 1;
5817         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5818                 saved_7F0 = 1;
5819
5820         /* Test Write at offset 4f0 */
5821         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5822                 fail = 1;
5823         if (read_eeprom(sp, 0x4F0, &ret_data))
5824                 fail = 1;
5825
5826         if (ret_data != 0x012345) {
5827                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5828                         "Data written %llx Data read %llx\n",
5829                         dev->name, (unsigned long long)0x12345,
5830                         (unsigned long long)ret_data);
5831                 fail = 1;
5832         }
5833
5834         /* Reset the EEPROM data go FFFF */
5835         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5836
5837         /* Test Write Request Error at offset 0x7c */
5838         if (sp->device_type == XFRAME_I_DEVICE)
5839                 if (!write_eeprom(sp, 0x07C, 0, 3))
5840                         fail = 1;
5841
5842         /* Test Write Request at offset 0x7f0 */
5843         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5844                 fail = 1;
5845         if (read_eeprom(sp, 0x7F0, &ret_data))
5846                 fail = 1;
5847
5848         if (ret_data != 0x012345) {
5849                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5850                         "Data written %llx Data read %llx\n",
5851                         dev->name, (unsigned long long)0x12345,
5852                         (unsigned long long)ret_data);
5853                 fail = 1;
5854         }
5855
5856         /* Reset the EEPROM data go FFFF */
5857         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5858
5859         if (sp->device_type == XFRAME_I_DEVICE) {
5860                 /* Test Write Error at offset 0x80 */
5861                 if (!write_eeprom(sp, 0x080, 0, 3))
5862                         fail = 1;
5863
5864                 /* Test Write Error at offset 0xfc */
5865                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5866                         fail = 1;
5867
5868                 /* Test Write Error at offset 0x100 */
5869                 if (!write_eeprom(sp, 0x100, 0, 3))
5870                         fail = 1;
5871
5872                 /* Test Write Error at offset 4ec */
5873                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5874                         fail = 1;
5875         }
5876
5877         /* Restore values at offsets 0x4F0 and 0x7F0 */
5878         if (saved_4F0)
5879                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5880         if (saved_7F0)
5881                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5882
5883         *data = fail;
5884         return fail;
5885 }
5886
5887 /**
5888  * s2io_bist_test - invokes the MemBist test of the card .
5889  * @sp : private member of the device structure, which is a pointer to the
5890  * s2io_nic structure.
5891  * @data:variable that returns the result of each of the test conducted by
5892  * the driver.
5893  * Description:
5894  * This invokes the MemBist test of the card. We give around
5895  * 2 secs time for the Test to complete. If it's still not complete
5896  * within this peiod, we consider that the test failed.
5897  * Return value:
5898  * 0 on success and -1 on failure.
5899  */
5900
5901 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5902 {
5903         u8 bist = 0;
5904         int cnt = 0, ret = -1;
5905
5906         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5907         bist |= PCI_BIST_START;
5908         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5909
5910         while (cnt < 20) {
5911                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5912                 if (!(bist & PCI_BIST_START)) {
5913                         *data = (bist & PCI_BIST_CODE_MASK);
5914                         ret = 0;
5915                         break;
5916                 }
5917                 msleep(100);
5918                 cnt++;
5919         }
5920
5921         return ret;
5922 }
5923
5924 /**
5925  * s2io-link_test - verifies the link state of the nic
5926  * @sp ; private member of the device structure, which is a pointer to the
5927  * s2io_nic structure.
5928  * @data: variable that returns the result of each of the test conducted by
5929  * the driver.
5930  * Description:
5931  * The function verifies the link state of the NIC and updates the input
5932  * argument 'data' appropriately.
5933  * Return value:
5934  * 0 on success.
5935  */
5936
5937 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5938 {
5939         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5940         u64 val64;
5941
5942         val64 = readq(&bar0->adapter_status);
5943         if(!(LINK_IS_UP(val64)))
5944                 *data = 1;
5945         else
5946                 *data = 0;
5947
5948         return *data;
5949 }
5950
5951 /**
5952  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5953  * @sp - private member of the device structure, which is a pointer to the
5954  * s2io_nic structure.
5955  * @data - variable that returns the result of each of the test
5956  * conducted by the driver.
5957  * Description:
5958  *  This is one of the offline test that tests the read and write
5959  *  access to the RldRam chip on the NIC.
5960  * Return value:
5961  *  0 on success.
5962  */
5963
5964 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5965 {
5966         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5967         u64 val64;
5968         int cnt, iteration = 0, test_fail = 0;
5969
5970         val64 = readq(&bar0->adapter_control);
5971         val64 &= ~ADAPTER_ECC_EN;
5972         writeq(val64, &bar0->adapter_control);
5973
5974         val64 = readq(&bar0->mc_rldram_test_ctrl);
5975         val64 |= MC_RLDRAM_TEST_MODE;
5976         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5977
5978         val64 = readq(&bar0->mc_rldram_mrs);
5979         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5980         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5981
5982         val64 |= MC_RLDRAM_MRS_ENABLE;
5983         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5984
5985         while (iteration < 2) {
5986                 val64 = 0x55555555aaaa0000ULL;
5987                 if (iteration == 1) {
5988                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5989                 }
5990                 writeq(val64, &bar0->mc_rldram_test_d0);
5991
5992                 val64 = 0xaaaa5a5555550000ULL;
5993                 if (iteration == 1) {
5994                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5995                 }
5996                 writeq(val64, &bar0->mc_rldram_test_d1);
5997
5998                 val64 = 0x55aaaaaaaa5a0000ULL;
5999                 if (iteration == 1) {
6000                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6001                 }
6002                 writeq(val64, &bar0->mc_rldram_test_d2);
6003
6004                 val64 = (u64) (0x0000003ffffe0100ULL);
6005                 writeq(val64, &bar0->mc_rldram_test_add);
6006
6007                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6008                         MC_RLDRAM_TEST_GO;
6009                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6010
6011                 for (cnt = 0; cnt < 5; cnt++) {
6012                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6013                         if (val64 & MC_RLDRAM_TEST_DONE)
6014                                 break;
6015                         msleep(200);
6016                 }
6017
6018                 if (cnt == 5)
6019                         break;
6020
6021                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6022                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6023
6024                 for (cnt = 0; cnt < 5; cnt++) {
6025                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6026                         if (val64 & MC_RLDRAM_TEST_DONE)
6027                                 break;
6028                         msleep(500);
6029                 }
6030
6031                 if (cnt == 5)
6032                         break;
6033
6034                 val64 = readq(&bar0->mc_rldram_test_ctrl);
6035                 if (!(val64 & MC_RLDRAM_TEST_PASS))
6036                         test_fail = 1;
6037
6038                 iteration++;
6039         }
6040
6041         *data = test_fail;
6042
6043         /* Bring the adapter out of test mode */
6044         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6045
6046         return test_fail;
6047 }
6048
6049 /**
6050  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6051  *  @sp : private member of the device structure, which is a pointer to the
6052  *  s2io_nic structure.
6053  *  @ethtest : pointer to a ethtool command specific structure that will be
6054  *  returned to the user.
6055  *  @data : variable that returns the result of each of the test
6056  * conducted by the driver.
6057  * Description:
6058  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6059  *  the health of the card.
6060  * Return value:
6061  *  void
6062  */
6063
6064 static void s2io_ethtool_test(struct net_device *dev,
6065                               struct ethtool_test *ethtest,
6066                               uint64_t * data)
6067 {
6068         struct s2io_nic *sp = dev->priv;
6069         int orig_state = netif_running(sp->dev);
6070
6071         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6072                 /* Offline Tests. */
6073                 if (orig_state)
6074                         s2io_close(sp->dev);
6075
6076                 if (s2io_register_test(sp, &data[0]))
6077                         ethtest->flags |= ETH_TEST_FL_FAILED;
6078
6079                 s2io_reset(sp);
6080
6081                 if (s2io_rldram_test(sp, &data[3]))
6082                         ethtest->flags |= ETH_TEST_FL_FAILED;
6083
6084                 s2io_reset(sp);
6085
6086                 if (s2io_eeprom_test(sp, &data[1]))
6087                         ethtest->flags |= ETH_TEST_FL_FAILED;
6088
6089                 if (s2io_bist_test(sp, &data[4]))
6090                         ethtest->flags |= ETH_TEST_FL_FAILED;
6091
6092                 if (orig_state)
6093                         s2io_open(sp->dev);
6094
6095                 data[2] = 0;
6096         } else {
6097                 /* Online Tests. */
6098                 if (!orig_state) {
6099                         DBG_PRINT(ERR_DBG,
6100                                   "%s: is not up, cannot run test\n",
6101                                   dev->name);
6102                         data[0] = -1;
6103                         data[1] = -1;
6104                         data[2] = -1;
6105                         data[3] = -1;
6106                         data[4] = -1;
6107                 }
6108
6109                 if (s2io_link_test(sp, &data[2]))
6110                         ethtest->flags |= ETH_TEST_FL_FAILED;
6111
6112                 data[0] = 0;
6113                 data[1] = 0;
6114                 data[3] = 0;
6115                 data[4] = 0;
6116         }
6117 }
6118
6119 static void s2io_get_ethtool_stats(struct net_device *dev,
6120                                    struct ethtool_stats *estats,
6121                                    u64 * tmp_stats)
6122 {
6123         int i = 0, k;
6124         struct s2io_nic *sp = dev->priv;
6125         struct stat_block *stat_info = sp->mac_control.stats_info;
6126
6127         s2io_updt_stats(sp);
6128         tmp_stats[i++] =
6129                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
6130                 le32_to_cpu(stat_info->tmac_frms);
6131         tmp_stats[i++] =
6132                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6133                 le32_to_cpu(stat_info->tmac_data_octets);
6134         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6135         tmp_stats[i++] =
6136                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6137                 le32_to_cpu(stat_info->tmac_mcst_frms);
6138         tmp_stats[i++] =
6139                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6140                 le32_to_cpu(stat_info->tmac_bcst_frms);
6141         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6142         tmp_stats[i++] =
6143                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6144                 le32_to_cpu(stat_info->tmac_ttl_octets);
6145         tmp_stats[i++] =
6146                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6147                 le32_to_cpu(stat_info->tmac_ucst_frms);
6148         tmp_stats[i++] =
6149                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6150                 le32_to_cpu(stat_info->tmac_nucst_frms);
6151         tmp_stats[i++] =
6152                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6153                 le32_to_cpu(stat_info->tmac_any_err_frms);
6154         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6155         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6156         tmp_stats[i++] =
6157                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6158                 le32_to_cpu(stat_info->tmac_vld_ip);
6159         tmp_stats[i++] =
6160                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6161                 le32_to_cpu(stat_info->tmac_drop_ip);
6162         tmp_stats[i++] =
6163                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6164                 le32_to_cpu(stat_info->tmac_icmp);
6165         tmp_stats[i++] =
6166                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6167                 le32_to_cpu(stat_info->tmac_rst_tcp);
6168         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6169         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6170                 le32_to_cpu(stat_info->tmac_udp);
6171         tmp_stats[i++] =
6172                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6173                 le32_to_cpu(stat_info->rmac_vld_frms);
6174         tmp_stats[i++] =
6175                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6176                 le32_to_cpu(stat_info->rmac_data_octets);
6177         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6178         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6179         tmp_stats[i++] =
6180                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6181                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6182         tmp_stats[i++] =
6183                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6184                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6185         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6186         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6187         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6188         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6189         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6190         tmp_stats[i++] =
6191                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6192                 le32_to_cpu(stat_info->rmac_ttl_octets);
6193         tmp_stats[i++] =
6194                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6195                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6196         tmp_stats[i++] =
6197                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6198                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6199         tmp_stats[i++] =
6200                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6201                 le32_to_cpu(stat_info->rmac_discarded_frms);
6202         tmp_stats[i++] =
6203                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6204                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6205         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6206         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6207         tmp_stats[i++] =
6208                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6209                 le32_to_cpu(stat_info->rmac_usized_frms);
6210         tmp_stats[i++] =
6211                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6212                 le32_to_cpu(stat_info->rmac_osized_frms);
6213         tmp_stats[i++] =
6214                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6215                 le32_to_cpu(stat_info->rmac_frag_frms);
6216         tmp_stats[i++] =
6217                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6218                 le32_to_cpu(stat_info->rmac_jabber_frms);
6219         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6220         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6221         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6222         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6223         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6224         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6225         tmp_stats[i++] =
6226                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6227                 le32_to_cpu(stat_info->rmac_ip);
6228         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6229         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6230         tmp_stats[i++] =
6231                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6232                 le32_to_cpu(stat_info->rmac_drop_ip);
6233         tmp_stats[i++] =
6234                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6235                 le32_to_cpu(stat_info->rmac_icmp);
6236         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6237         tmp_stats[i++] =
6238                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6239                 le32_to_cpu(stat_info->rmac_udp);
6240         tmp_stats[i++] =
6241                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6242                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6243         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6244         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6245         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6246         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6247         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6248         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6249         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6250         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6251         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6252         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6253         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6254         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6255         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6256         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6257         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6258         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6259         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6260         tmp_stats[i++] =
6261                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6262                 le32_to_cpu(stat_info->rmac_pause_cnt);
6263         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6264         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6265         tmp_stats[i++] =
6266                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6267                 le32_to_cpu(stat_info->rmac_accepted_ip);
6268         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6269         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6270         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6271         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6272         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6273         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6274         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6275         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6276         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6277         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6278         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6279         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6280         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6281         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6282         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6283         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6284         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6285         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6286         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6287
6288         /* Enhanced statistics exist only for Hercules */
6289         if(sp->device_type == XFRAME_II_DEVICE) {
6290                 tmp_stats[i++] =
6291                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6292                 tmp_stats[i++] =
6293                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6294                 tmp_stats[i++] =
6295                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6296                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6297                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6298                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6299                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6300                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6301                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6302                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6303                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6304                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6305                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6306                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6307                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6308                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6309         }
6310
6311         tmp_stats[i++] = 0;
6312         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6313         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6314         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6315         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6316         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6317         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6318         for (k = 0; k < MAX_RX_RINGS; k++)
6319                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6320         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6321         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6322         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6323         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6324         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6325         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6326         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6327         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6328         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6329         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6330         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6331         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6332         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6333         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6334         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6335         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6336         if (stat_info->sw_stat.num_aggregations) {
6337                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6338                 int count = 0;
6339                 /*
6340                  * Since 64-bit divide does not work on all platforms,
6341                  * do repeated subtraction.
6342                  */
6343                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6344                         tmp -= stat_info->sw_stat.num_aggregations;
6345                         count++;
6346                 }
6347                 tmp_stats[i++] = count;
6348         }
6349         else
6350                 tmp_stats[i++] = 0;
6351         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6352         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6353         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6354         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6355         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6356         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6357         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6358         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6359         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6360
6361         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6362         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6363         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6364         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6365         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6366
6367         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6368         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6369         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6370         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6371         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6372         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6373         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6374         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6375         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6376         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6377         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6378         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6379         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6380         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6381         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6382         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6383         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6384         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6385         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6386         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6387         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6388         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6389         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6390         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6391         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6392         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6393 }
6394
6395 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6396 {
6397         return (XENA_REG_SPACE);
6398 }
6399
6400
6401 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6402 {
6403         struct s2io_nic *sp = dev->priv;
6404
6405         return (sp->rx_csum);
6406 }
6407
6408 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6409 {
6410         struct s2io_nic *sp = dev->priv;
6411
6412         if (data)
6413                 sp->rx_csum = 1;
6414         else
6415                 sp->rx_csum = 0;
6416
6417         return 0;
6418 }
6419
6420 static int s2io_get_eeprom_len(struct net_device *dev)
6421 {
6422         return (XENA_EEPROM_SPACE);
6423 }
6424
6425 static int s2io_get_sset_count(struct net_device *dev, int sset)
6426 {
6427         struct s2io_nic *sp = dev->priv;
6428
6429         switch (sset) {
6430         case ETH_SS_TEST:
6431                 return S2IO_TEST_LEN;
6432         case ETH_SS_STATS:
6433                 switch(sp->device_type) {
6434                 case XFRAME_I_DEVICE:
6435                         return XFRAME_I_STAT_LEN;
6436                 case XFRAME_II_DEVICE:
6437                         return XFRAME_II_STAT_LEN;
6438                 default:
6439                         return 0;
6440                 }
6441         default:
6442                 return -EOPNOTSUPP;
6443         }
6444 }
6445
6446 static void s2io_ethtool_get_strings(struct net_device *dev,
6447                                      u32 stringset, u8 * data)
6448 {
6449         int stat_size = 0;
6450         struct s2io_nic *sp = dev->priv;
6451
6452         switch (stringset) {
6453         case ETH_SS_TEST:
6454                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6455                 break;
6456         case ETH_SS_STATS:
6457                 stat_size = sizeof(ethtool_xena_stats_keys);
6458                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6459                 if(sp->device_type == XFRAME_II_DEVICE) {
6460                         memcpy(data + stat_size,
6461                                 &ethtool_enhanced_stats_keys,
6462                                 sizeof(ethtool_enhanced_stats_keys));
6463                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6464                 }
6465
6466                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6467                         sizeof(ethtool_driver_stats_keys));
6468         }
6469 }
6470
6471 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6472 {
6473         if (data)
6474                 dev->features |= NETIF_F_IP_CSUM;
6475         else
6476                 dev->features &= ~NETIF_F_IP_CSUM;
6477
6478         return 0;
6479 }
6480
6481 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6482 {
6483         return (dev->features & NETIF_F_TSO) != 0;
6484 }
6485 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6486 {
6487         if (data)
6488                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6489         else
6490                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6491
6492         return 0;
6493 }
6494
6495 static const struct ethtool_ops netdev_ethtool_ops = {
6496         .get_settings = s2io_ethtool_gset,
6497         .set_settings = s2io_ethtool_sset,
6498         .get_drvinfo = s2io_ethtool_gdrvinfo,
6499         .get_regs_len = s2io_ethtool_get_regs_len,
6500         .get_regs = s2io_ethtool_gregs,
6501         .get_link = ethtool_op_get_link,
6502         .get_eeprom_len = s2io_get_eeprom_len,
6503         .get_eeprom = s2io_ethtool_geeprom,
6504         .set_eeprom = s2io_ethtool_seeprom,
6505         .get_ringparam = s2io_ethtool_gringparam,
6506         .get_pauseparam = s2io_ethtool_getpause_data,
6507         .set_pauseparam = s2io_ethtool_setpause_data,
6508         .get_rx_csum = s2io_ethtool_get_rx_csum,
6509         .set_rx_csum = s2io_ethtool_set_rx_csum,
6510         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6511         .set_sg = ethtool_op_set_sg,
6512         .get_tso = s2io_ethtool_op_get_tso,
6513         .set_tso = s2io_ethtool_op_set_tso,
6514         .set_ufo = ethtool_op_set_ufo,
6515         .self_test = s2io_ethtool_test,
6516         .get_strings = s2io_ethtool_get_strings,
6517         .phys_id = s2io_ethtool_idnic,
6518         .get_ethtool_stats = s2io_get_ethtool_stats,
6519         .get_sset_count = s2io_get_sset_count,
6520 };
6521
6522 /**
6523  *  s2io_ioctl - Entry point for the Ioctl
6524  *  @dev :  Device pointer.
6525  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6526  *  a proprietary structure used to pass information to the driver.
6527  *  @cmd :  This is used to distinguish between the different commands that
6528  *  can be passed to the IOCTL functions.
6529  *  Description:
6530  *  Currently there are no special functionality supported in IOCTL, hence
6531  *  function always return EOPNOTSUPPORTED
6532  */
6533
6534 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6535 {
6536         return -EOPNOTSUPP;
6537 }
6538
6539 /**
6540  *  s2io_change_mtu - entry point to change MTU size for the device.
6541  *   @dev : device pointer.
6542  *   @new_mtu : the new MTU size for the device.
6543  *   Description: A driver entry point to change MTU size for the device.
6544  *   Before changing the MTU the device must be stopped.
6545  *  Return value:
6546  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6547  *   file on failure.
6548  */
6549
6550 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6551 {
6552         struct s2io_nic *sp = dev->priv;
6553         int ret = 0;
6554
6555         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6556                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6557                           dev->name);
6558                 return -EPERM;
6559         }
6560
6561         dev->mtu = new_mtu;
6562         if (netif_running(dev)) {
6563                 s2io_card_down(sp);
6564                 netif_stop_queue(dev);
6565                 ret = s2io_card_up(sp);
6566                 if (ret) {
6567                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6568                                   __FUNCTION__);
6569                         return ret;
6570                 }
6571                 if (netif_queue_stopped(dev))
6572                         netif_wake_queue(dev);
6573         } else { /* Device is down */
6574                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6575                 u64 val64 = new_mtu;
6576
6577                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6578         }
6579
6580         return ret;
6581 }
6582
6583 /**
6584  *  s2io_tasklet - Bottom half of the ISR.
6585  *  @dev_adr : address of the device structure in dma_addr_t format.
6586  *  Description:
6587  *  This is the tasklet or the bottom half of the ISR. This is
6588  *  an extension of the ISR which is scheduled by the scheduler to be run
6589  *  when the load on the CPU is low. All low priority tasks of the ISR can
6590  *  be pushed into the tasklet. For now the tasklet is used only to
6591  *  replenish the Rx buffers in the Rx buffer descriptors.
6592  *  Return value:
6593  *  void.
6594  */
6595
6596 static void s2io_tasklet(unsigned long dev_addr)
6597 {
6598         struct net_device *dev = (struct net_device *) dev_addr;
6599         struct s2io_nic *sp = dev->priv;
6600         int i, ret;
6601         struct mac_info *mac_control;
6602         struct config_param *config;
6603
6604         mac_control = &sp->mac_control;
6605         config = &sp->config;
6606
6607         if (!TASKLET_IN_USE) {
6608                 for (i = 0; i < config->rx_ring_num; i++) {
6609                         ret = fill_rx_buffers(sp, i);
6610                         if (ret == -ENOMEM) {
6611                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
6612                                           dev->name);
6613                                 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6614                                 break;
6615                         } else if (ret == -EFILL) {
6616                                 DBG_PRINT(INFO_DBG,
6617                                           "%s: Rx Ring %d is full\n",
6618                                           dev->name, i);
6619                                 break;
6620                         }
6621                 }
6622                 clear_bit(0, (&sp->tasklet_status));
6623         }
6624 }
6625
6626 /**
6627  * s2io_set_link - Set the LInk status
6628  * @data: long pointer to device private structue
6629  * Description: Sets the link status for the adapter
6630  */
6631
6632 static void s2io_set_link(struct work_struct *work)
6633 {
6634         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6635         struct net_device *dev = nic->dev;
6636         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6637         register u64 val64;
6638         u16 subid;
6639
6640         rtnl_lock();
6641
6642         if (!netif_running(dev))
6643                 goto out_unlock;
6644
6645         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6646                 /* The card is being reset, no point doing anything */
6647                 goto out_unlock;
6648         }
6649
6650         subid = nic->pdev->subsystem_device;
6651         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6652                 /*
6653                  * Allow a small delay for the NICs self initiated
6654                  * cleanup to complete.
6655                  */
6656                 msleep(100);
6657         }
6658
6659         val64 = readq(&bar0->adapter_status);
6660         if (LINK_IS_UP(val64)) {
6661                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6662                         if (verify_xena_quiescence(nic)) {
6663                                 val64 = readq(&bar0->adapter_control);
6664                                 val64 |= ADAPTER_CNTL_EN;
6665                                 writeq(val64, &bar0->adapter_control);
6666                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6667                                         nic->device_type, subid)) {
6668                                         val64 = readq(&bar0->gpio_control);
6669                                         val64 |= GPIO_CTRL_GPIO_0;
6670                                         writeq(val64, &bar0->gpio_control);
6671                                         val64 = readq(&bar0->gpio_control);
6672                                 } else {
6673                                         val64 |= ADAPTER_LED_ON;
6674                                         writeq(val64, &bar0->adapter_control);
6675                                 }
6676                                 nic->device_enabled_once = TRUE;
6677                         } else {
6678                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6679                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6680                                 netif_stop_queue(dev);
6681                         }
6682                 }
6683                 val64 = readq(&bar0->adapter_control);
6684                 val64 |= ADAPTER_LED_ON;
6685                 writeq(val64, &bar0->adapter_control);
6686                 s2io_link(nic, LINK_UP);
6687         } else {
6688                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6689                                                       subid)) {
6690                         val64 = readq(&bar0->gpio_control);
6691                         val64 &= ~GPIO_CTRL_GPIO_0;
6692                         writeq(val64, &bar0->gpio_control);
6693                         val64 = readq(&bar0->gpio_control);
6694                 }
6695                 /* turn off LED */
6696                 val64 = readq(&bar0->adapter_control);
6697                 val64 = val64 &(~ADAPTER_LED_ON);
6698                 writeq(val64, &bar0->adapter_control);
6699                 s2io_link(nic, LINK_DOWN);
6700         }
6701         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6702
6703 out_unlock:
6704         rtnl_unlock();
6705 }
6706
6707 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6708                                 struct buffAdd *ba,
6709                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6710                                 u64 *temp2, int size)
6711 {
6712         struct net_device *dev = sp->dev;
6713         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6714
6715         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6716                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6717                 /* allocate skb */
6718                 if (*skb) {
6719                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6720                         /*
6721                          * As Rx frame are not going to be processed,
6722                          * using same mapped address for the Rxd
6723                          * buffer pointer
6724                          */
6725                         rxdp1->Buffer0_ptr = *temp0;
6726                 } else {
6727                         *skb = dev_alloc_skb(size);
6728                         if (!(*skb)) {
6729                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6730                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6731                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6732                                 sp->mac_control.stats_info->sw_stat. \
6733                                         mem_alloc_fail_cnt++;
6734                                 return -ENOMEM ;
6735                         }
6736                         sp->mac_control.stats_info->sw_stat.mem_allocated
6737                                 += (*skb)->truesize;
6738                         /* storing the mapped addr in a temp variable
6739                          * such it will be used for next rxd whose
6740                          * Host Control is NULL
6741                          */
6742                         rxdp1->Buffer0_ptr = *temp0 =
6743                                 pci_map_single( sp->pdev, (*skb)->data,
6744                                         size - NET_IP_ALIGN,
6745                                         PCI_DMA_FROMDEVICE);
6746                         if( (rxdp1->Buffer0_ptr == 0) ||
6747                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6748                                 goto memalloc_failed;
6749                         }
6750                         rxdp->Host_Control = (unsigned long) (*skb);
6751                 }
6752         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6753                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6754                 /* Two buffer Mode */
6755                 if (*skb) {
6756                         rxdp3->Buffer2_ptr = *temp2;
6757                         rxdp3->Buffer0_ptr = *temp0;
6758                         rxdp3->Buffer1_ptr = *temp1;
6759                 } else {
6760                         *skb = dev_alloc_skb(size);
6761                         if (!(*skb)) {
6762                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6763                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6764                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6765                                 sp->mac_control.stats_info->sw_stat. \
6766                                         mem_alloc_fail_cnt++;
6767                                 return -ENOMEM;
6768                         }
6769                         sp->mac_control.stats_info->sw_stat.mem_allocated
6770                                 += (*skb)->truesize;
6771                         rxdp3->Buffer2_ptr = *temp2 =
6772                                 pci_map_single(sp->pdev, (*skb)->data,
6773                                                dev->mtu + 4,
6774                                                PCI_DMA_FROMDEVICE);
6775                         if( (rxdp3->Buffer2_ptr == 0) ||
6776                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6777                                 goto memalloc_failed;
6778                         }
6779                         rxdp3->Buffer0_ptr = *temp0 =
6780                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6781                                                 PCI_DMA_FROMDEVICE);
6782                         if( (rxdp3->Buffer0_ptr == 0) ||
6783                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6784                                 pci_unmap_single (sp->pdev,
6785                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6786                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6787                                 goto memalloc_failed;
6788                         }
6789                         rxdp->Host_Control = (unsigned long) (*skb);
6790
6791                         /* Buffer-1 will be dummy buffer not used */
6792                         rxdp3->Buffer1_ptr = *temp1 =
6793                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6794                                                 PCI_DMA_FROMDEVICE);
6795                         if( (rxdp3->Buffer1_ptr == 0) ||
6796                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6797                                 pci_unmap_single (sp->pdev,
6798                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6799                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6800                                 pci_unmap_single (sp->pdev,
6801                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6802                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6803                                 goto memalloc_failed;
6804                         }
6805                 }
6806         }
6807         return 0;
6808         memalloc_failed:
6809                 stats->pci_map_fail_cnt++;
6810                 stats->mem_freed += (*skb)->truesize;
6811                 dev_kfree_skb(*skb);
6812                 return -ENOMEM;
6813 }
6814
6815 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6816                                 int size)
6817 {
6818         struct net_device *dev = sp->dev;
6819         if (sp->rxd_mode == RXD_MODE_1) {
6820                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6821         } else if (sp->rxd_mode == RXD_MODE_3B) {
6822                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6823                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6824                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6825         }
6826 }
6827
6828 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6829 {
6830         int i, j, k, blk_cnt = 0, size;
6831         struct mac_info * mac_control = &sp->mac_control;
6832         struct config_param *config = &sp->config;
6833         struct net_device *dev = sp->dev;
6834         struct RxD_t *rxdp = NULL;
6835         struct sk_buff *skb = NULL;
6836         struct buffAdd *ba = NULL;
6837         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6838
6839         /* Calculate the size based on ring mode */
6840         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6841                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6842         if (sp->rxd_mode == RXD_MODE_1)
6843                 size += NET_IP_ALIGN;
6844         else if (sp->rxd_mode == RXD_MODE_3B)
6845                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6846
6847         for (i = 0; i < config->rx_ring_num; i++) {
6848                 blk_cnt = config->rx_cfg[i].num_rxd /
6849                         (rxd_count[sp->rxd_mode] +1);
6850
6851                 for (j = 0; j < blk_cnt; j++) {
6852                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6853                                 rxdp = mac_control->rings[i].
6854                                         rx_blocks[j].rxds[k].virt_addr;
6855                                 if(sp->rxd_mode == RXD_MODE_3B)
6856                                         ba = &mac_control->rings[i].ba[j][k];
6857                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6858                                                        &skb,(u64 *)&temp0_64,
6859                                                        (u64 *)&temp1_64,
6860                                                        (u64 *)&temp2_64,
6861                                                         size) == ENOMEM) {
6862                                         return 0;
6863                                 }
6864
6865                                 set_rxd_buffer_size(sp, rxdp, size);
6866                                 wmb();
6867                                 /* flip the Ownership bit to Hardware */
6868                                 rxdp->Control_1 |= RXD_OWN_XENA;
6869                         }
6870                 }
6871         }
6872         return 0;
6873
6874 }
6875
6876 static int s2io_add_isr(struct s2io_nic * sp)
6877 {
6878         int ret = 0;
6879         struct net_device *dev = sp->dev;
6880         int err = 0;
6881
6882         if (sp->config.intr_type == MSI_X)
6883                 ret = s2io_enable_msi_x(sp);
6884         if (ret) {
6885                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6886                 sp->config.intr_type = INTA;
6887         }
6888
6889         /* Store the values of the MSIX table in the struct s2io_nic structure */
6890         store_xmsi_data(sp);
6891
6892         /* After proper initialization of H/W, register ISR */
6893         if (sp->config.intr_type == MSI_X) {
6894                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6895
6896                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6897                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6898                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6899                                         dev->name, i);
6900                                 err = request_irq(sp->entries[i].vector,
6901                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6902                                                   sp->s2io_entries[i].arg);
6903                                 /* If either data or addr is zero print it */
6904                                 if(!(sp->msix_info[i].addr &&
6905                                         sp->msix_info[i].data)) {
6906                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
6907                                                 "Data:0x%lx\n",sp->desc[i],
6908                                                 (unsigned long long)
6909                                                 sp->msix_info[i].addr,
6910                                                 (unsigned long)
6911                                                 ntohl(sp->msix_info[i].data));
6912                                 } else {
6913                                         msix_tx_cnt++;
6914                                 }
6915                         } else {
6916                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6917                                         dev->name, i);
6918                                 err = request_irq(sp->entries[i].vector,
6919                                           s2io_msix_ring_handle, 0, sp->desc[i],
6920                                                   sp->s2io_entries[i].arg);
6921                                 /* If either data or addr is zero print it */
6922                                 if(!(sp->msix_info[i].addr &&
6923                                         sp->msix_info[i].data)) {
6924                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
6925                                                 "Data:0x%lx\n",sp->desc[i],
6926                                                 (unsigned long long)
6927                                                 sp->msix_info[i].addr,
6928                                                 (unsigned long)
6929                                                 ntohl(sp->msix_info[i].data));
6930                                 } else {
6931                                         msix_rx_cnt++;
6932                                 }
6933                         }
6934                         if (err) {
6935                                 remove_msix_isr(sp);
6936                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6937                                           "failed\n", dev->name, i);
6938                                 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
6939                                                  dev->name);
6940                                 sp->config.intr_type = INTA;
6941                                 break;
6942                         }
6943                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6944                 }
6945                 if (!err) {
6946                         printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
6947                                 msix_tx_cnt);
6948                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
6949                                 msix_rx_cnt);
6950                 }
6951         }
6952         if (sp->config.intr_type == INTA) {
6953                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6954                                 sp->name, dev);
6955                 if (err) {
6956                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6957                                   dev->name);
6958                         return -1;
6959                 }
6960         }
6961         return 0;
6962 }
6963 static void s2io_rem_isr(struct s2io_nic * sp)
6964 {
6965         if (sp->config.intr_type == MSI_X)
6966                 remove_msix_isr(sp);
6967         else
6968                 remove_inta_isr(sp);
6969 }
6970
6971 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6972 {
6973         int cnt = 0;
6974         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6975         unsigned long flags;
6976         register u64 val64 = 0;
6977         struct config_param *config;
6978         config = &sp->config;
6979
6980         if (!is_s2io_card_up(sp))
6981                 return;
6982
6983         del_timer_sync(&sp->alarm_timer);
6984         /* If s2io_set_link task is executing, wait till it completes. */
6985         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
6986                 msleep(50);
6987         }
6988         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
6989
6990         /* Disable napi */
6991         if (config->napi)
6992                 napi_disable(&sp->napi);
6993
6994         /* disable Tx and Rx traffic on the NIC */
6995         if (do_io)
6996                 stop_nic(sp);
6997
6998         s2io_rem_isr(sp);
6999
7000         /* Kill tasklet. */
7001         tasklet_kill(&sp->task);
7002
7003         /* Check if the device is Quiescent and then Reset the NIC */
7004         while(do_io) {
7005                 /* As per the HW requirement we need to replenish the
7006                  * receive buffer to avoid the ring bump. Since there is
7007                  * no intention of processing the Rx frame at this pointwe are
7008                  * just settting the ownership bit of rxd in Each Rx
7009                  * ring to HW and set the appropriate buffer size
7010                  * based on the ring mode
7011                  */
7012                 rxd_owner_bit_reset(sp);
7013
7014                 val64 = readq(&bar0->adapter_status);
7015                 if (verify_xena_quiescence(sp)) {
7016                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7017                         break;
7018                 }
7019
7020                 msleep(50);
7021                 cnt++;
7022                 if (cnt == 10) {
7023                         DBG_PRINT(ERR_DBG,
7024                                   "s2io_close:Device not Quiescent ");
7025                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7026                                   (unsigned long long) val64);
7027                         break;
7028                 }
7029         }
7030         if (do_io)
7031                 s2io_reset(sp);
7032
7033         /* Free all Tx buffers */
7034         free_tx_buffers(sp);
7035
7036         /* Free all Rx buffers */
7037         spin_lock_irqsave(&sp->rx_lock, flags);
7038         free_rx_buffers(sp);
7039         spin_unlock_irqrestore(&sp->rx_lock, flags);
7040
7041         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7042 }
7043
7044 static void s2io_card_down(struct s2io_nic * sp)
7045 {
7046         do_s2io_card_down(sp, 1);
7047 }
7048
7049 static int s2io_card_up(struct s2io_nic * sp)
7050 {
7051         int i, ret = 0;
7052         struct mac_info *mac_control;
7053         struct config_param *config;
7054         struct net_device *dev = (struct net_device *) sp->dev;
7055         u16 interruptible;
7056
7057         /* Initialize the H/W I/O registers */
7058         ret = init_nic(sp);
7059         if (ret != 0) {
7060                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7061                           dev->name);
7062                 if (ret != -EIO)
7063                         s2io_reset(sp);
7064                 return ret;
7065         }
7066
7067         /*
7068          * Initializing the Rx buffers. For now we are considering only 1
7069          * Rx ring and initializing buffers into 30 Rx blocks
7070          */
7071         mac_control = &sp->mac_control;
7072         config = &sp->config;
7073
7074         for (i = 0; i < config->rx_ring_num; i++) {
7075                 if ((ret = fill_rx_buffers(sp, i))) {
7076                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7077                                   dev->name);
7078                         s2io_reset(sp);
7079                         free_rx_buffers(sp);
7080                         return -ENOMEM;
7081                 }
7082                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7083                           atomic_read(&sp->rx_bufs_left[i]));
7084         }
7085
7086         /* Initialise napi */
7087         if (config->napi)
7088                 napi_enable(&sp->napi);
7089
7090         /* Maintain the state prior to the open */
7091         if (sp->promisc_flg)
7092                 sp->promisc_flg = 0;
7093         if (sp->m_cast_flg) {
7094                 sp->m_cast_flg = 0;
7095                 sp->all_multi_pos= 0;
7096         }
7097
7098         /* Setting its receive mode */
7099         s2io_set_multicast(dev);
7100
7101         if (sp->lro) {
7102                 /* Initialize max aggregatable pkts per session based on MTU */
7103                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7104                 /* Check if we can use(if specified) user provided value */
7105                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7106                         sp->lro_max_aggr_per_sess = lro_max_pkts;
7107         }
7108
7109         /* Enable Rx Traffic and interrupts on the NIC */
7110         if (start_nic(sp)) {
7111                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7112                 s2io_reset(sp);
7113                 free_rx_buffers(sp);
7114                 return -ENODEV;
7115         }
7116
7117         /* Add interrupt service routine */
7118         if (s2io_add_isr(sp) != 0) {
7119                 if (sp->config.intr_type == MSI_X)
7120                         s2io_rem_isr(sp);
7121                 s2io_reset(sp);
7122                 free_rx_buffers(sp);
7123                 return -ENODEV;
7124         }
7125
7126         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7127
7128         /* Enable tasklet for the device */
7129         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
7130
7131         /*  Enable select interrupts */
7132         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7133         if (sp->config.intr_type != INTA)
7134                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
7135         else {
7136                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7137                 interruptible |= TX_PIC_INTR;
7138                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7139         }
7140
7141         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7142         return 0;
7143 }
7144
7145 /**
7146  * s2io_restart_nic - Resets the NIC.
7147  * @data : long pointer to the device private structure
7148  * Description:
7149  * This function is scheduled to be run by the s2io_tx_watchdog
7150  * function after 0.5 secs to reset the NIC. The idea is to reduce
7151  * the run time of the watch dog routine which is run holding a
7152  * spin lock.
7153  */
7154
7155 static void s2io_restart_nic(struct work_struct *work)
7156 {
7157         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7158         struct net_device *dev = sp->dev;
7159
7160         rtnl_lock();
7161
7162         if (!netif_running(dev))
7163                 goto out_unlock;
7164
7165         s2io_card_down(sp);
7166         if (s2io_card_up(sp)) {
7167                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7168                           dev->name);
7169         }
7170         netif_wake_queue(dev);
7171         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7172                   dev->name);
7173 out_unlock:
7174         rtnl_unlock();
7175 }
7176
7177 /**
7178  *  s2io_tx_watchdog - Watchdog for transmit side.
7179  *  @dev : Pointer to net device structure
7180  *  Description:
7181  *  This function is triggered if the Tx Queue is stopped
7182  *  for a pre-defined amount of time when the Interface is still up.
7183  *  If the Interface is jammed in such a situation, the hardware is
7184  *  reset (by s2io_close) and restarted again (by s2io_open) to
7185  *  overcome any problem that might have been caused in the hardware.
7186  *  Return value:
7187  *  void
7188  */
7189
7190 static void s2io_tx_watchdog(struct net_device *dev)
7191 {
7192         struct s2io_nic *sp = dev->priv;
7193
7194         if (netif_carrier_ok(dev)) {
7195                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7196                 schedule_work(&sp->rst_timer_task);
7197                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7198         }
7199 }
7200
7201 /**
7202  *   rx_osm_handler - To perform some OS related operations on SKB.
7203  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7204  *   @skb : the socket buffer pointer.
7205  *   @len : length of the packet
7206  *   @cksum : FCS checksum of the frame.
7207  *   @ring_no : the ring from which this RxD was extracted.
7208  *   Description:
7209  *   This function is called by the Rx interrupt serivce routine to perform
7210  *   some OS related operations on the SKB before passing it to the upper
7211  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7212  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7213  *   to the upper layer. If the checksum is wrong, it increments the Rx
7214  *   packet error count, frees the SKB and returns error.
7215  *   Return value:
7216  *   SUCCESS on success and -1 on failure.
7217  */
7218 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7219 {
7220         struct s2io_nic *sp = ring_data->nic;
7221         struct net_device *dev = (struct net_device *) sp->dev;
7222         struct sk_buff *skb = (struct sk_buff *)
7223                 ((unsigned long) rxdp->Host_Control);
7224         int ring_no = ring_data->ring_no;
7225         u16 l3_csum, l4_csum;
7226         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7227         struct lro *lro;
7228         u8 err_mask;
7229
7230         skb->dev = dev;
7231
7232         if (err) {
7233                 /* Check for parity error */
7234                 if (err & 0x1) {
7235                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7236                 }
7237                 err_mask = err >> 48;
7238                 switch(err_mask) {
7239                         case 1:
7240                                 sp->mac_control.stats_info->sw_stat.
7241                                 rx_parity_err_cnt++;
7242                         break;
7243
7244                         case 2:
7245                                 sp->mac_control.stats_info->sw_stat.
7246                                 rx_abort_cnt++;
7247                         break;
7248
7249                         case 3:
7250                                 sp->mac_control.stats_info->sw_stat.
7251                                 rx_parity_abort_cnt++;
7252                         break;
7253
7254                         case 4:
7255                                 sp->mac_control.stats_info->sw_stat.
7256                                 rx_rda_fail_cnt++;
7257                         break;
7258
7259                         case 5:
7260                                 sp->mac_control.stats_info->sw_stat.
7261                                 rx_unkn_prot_cnt++;
7262                         break;
7263
7264                         case 6:
7265                                 sp->mac_control.stats_info->sw_stat.
7266                                 rx_fcs_err_cnt++;
7267                         break;
7268
7269                         case 7:
7270                                 sp->mac_control.stats_info->sw_stat.
7271                                 rx_buf_size_err_cnt++;
7272                         break;
7273
7274                         case 8:
7275                                 sp->mac_control.stats_info->sw_stat.
7276                                 rx_rxd_corrupt_cnt++;
7277                         break;
7278
7279                         case 15:
7280                                 sp->mac_control.stats_info->sw_stat.
7281                                 rx_unkn_err_cnt++;
7282                         break;
7283                 }
7284                 /*
7285                 * Drop the packet if bad transfer code. Exception being
7286                 * 0x5, which could be due to unsupported IPv6 extension header.
7287                 * In this case, we let stack handle the packet.
7288                 * Note that in this case, since checksum will be incorrect,
7289                 * stack will validate the same.
7290                 */
7291                 if (err_mask != 0x5) {
7292                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7293                                 dev->name, err_mask);
7294                         sp->stats.rx_crc_errors++;
7295                         sp->mac_control.stats_info->sw_stat.mem_freed
7296                                 += skb->truesize;
7297                         dev_kfree_skb(skb);
7298                         atomic_dec(&sp->rx_bufs_left[ring_no]);
7299                         rxdp->Host_Control = 0;
7300                         return 0;
7301                 }
7302         }
7303
7304         /* Updating statistics */
7305         sp->stats.rx_packets++;
7306         rxdp->Host_Control = 0;
7307         if (sp->rxd_mode == RXD_MODE_1) {
7308                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7309
7310                 sp->stats.rx_bytes += len;
7311                 skb_put(skb, len);
7312
7313         } else if (sp->rxd_mode == RXD_MODE_3B) {
7314                 int get_block = ring_data->rx_curr_get_info.block_index;
7315                 int get_off = ring_data->rx_curr_get_info.offset;
7316                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7317                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7318                 unsigned char *buff = skb_push(skb, buf0_len);
7319
7320                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7321                 sp->stats.rx_bytes += buf0_len + buf2_len;
7322                 memcpy(buff, ba->ba_0, buf0_len);
7323                 skb_put(skb, buf2_len);
7324         }
7325
7326         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7327             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7328             (sp->rx_csum)) {
7329                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7330                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7331                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7332                         /*
7333                          * NIC verifies if the Checksum of the received
7334                          * frame is Ok or not and accordingly returns
7335                          * a flag in the RxD.
7336                          */
7337                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7338                         if (sp->lro) {
7339                                 u32 tcp_len;
7340                                 u8 *tcp;
7341                                 int ret = 0;
7342
7343                                 ret = s2io_club_tcp_session(skb->data, &tcp,
7344                                                             &tcp_len, &lro,
7345                                                             rxdp, sp);
7346                                 switch (ret) {
7347                                         case 3: /* Begin anew */
7348                                                 lro->parent = skb;
7349                                                 goto aggregate;
7350                                         case 1: /* Aggregate */
7351                                         {
7352                                                 lro_append_pkt(sp, lro,
7353                                                         skb, tcp_len);
7354                                                 goto aggregate;
7355                                         }
7356                                         case 4: /* Flush session */
7357                                         {
7358                                                 lro_append_pkt(sp, lro,
7359                                                         skb, tcp_len);
7360                                                 queue_rx_frame(lro->parent);
7361                                                 clear_lro_session(lro);
7362                                                 sp->mac_control.stats_info->
7363                                                     sw_stat.flush_max_pkts++;
7364                                                 goto aggregate;
7365                                         }
7366                                         case 2: /* Flush both */
7367                                                 lro->parent->data_len =
7368                                                         lro->frags_len;
7369                                                 sp->mac_control.stats_info->
7370                                                      sw_stat.sending_both++;
7371                                                 queue_rx_frame(lro->parent);
7372                                                 clear_lro_session(lro);
7373                                                 goto send_up;
7374                                         case 0: /* sessions exceeded */
7375                                         case -1: /* non-TCP or not
7376                                                   * L2 aggregatable
7377                                                   */
7378                                         case 5: /*
7379                                                  * First pkt in session not
7380                                                  * L3/L4 aggregatable
7381                                                  */
7382                                                 break;
7383                                         default:
7384                                                 DBG_PRINT(ERR_DBG,
7385                                                         "%s: Samadhana!!\n",
7386                                                          __FUNCTION__);
7387                                                 BUG();
7388                                 }
7389                         }
7390                 } else {
7391                         /*
7392                          * Packet with erroneous checksum, let the
7393                          * upper layers deal with it.
7394                          */
7395                         skb->ip_summed = CHECKSUM_NONE;
7396                 }
7397         } else {
7398                 skb->ip_summed = CHECKSUM_NONE;
7399         }
7400         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7401         if (!sp->lro) {
7402                 skb->protocol = eth_type_trans(skb, dev);
7403                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7404                         vlan_strip_flag)) {
7405                         /* Queueing the vlan frame to the upper layer */
7406                         if (napi)
7407                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7408                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7409                         else
7410                                 vlan_hwaccel_rx(skb, sp->vlgrp,
7411                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7412                 } else {
7413                         if (napi)
7414                                 netif_receive_skb(skb);
7415                         else
7416                                 netif_rx(skb);
7417                 }
7418         } else {
7419 send_up:
7420                 queue_rx_frame(skb);
7421         }
7422         dev->last_rx = jiffies;
7423 aggregate:
7424         atomic_dec(&sp->rx_bufs_left[ring_no]);
7425         return SUCCESS;
7426 }
7427
7428 /**
7429  *  s2io_link - stops/starts the Tx queue.
7430  *  @sp : private member of the device structure, which is a pointer to the
7431  *  s2io_nic structure.
7432  *  @link : inidicates whether link is UP/DOWN.
7433  *  Description:
7434  *  This function stops/starts the Tx queue depending on whether the link
7435  *  status of the NIC is is down or up. This is called by the Alarm
7436  *  interrupt handler whenever a link change interrupt comes up.
7437  *  Return value:
7438  *  void.
7439  */
7440
7441 static void s2io_link(struct s2io_nic * sp, int link)
7442 {
7443         struct net_device *dev = (struct net_device *) sp->dev;
7444
7445         if (link != sp->last_link_state) {
7446                 if (link == LINK_DOWN) {
7447                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7448                         netif_carrier_off(dev);
7449                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7450                         sp->mac_control.stats_info->sw_stat.link_up_time =
7451                                 jiffies - sp->start_time;
7452                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7453                 } else {
7454                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7455                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7456                         sp->mac_control.stats_info->sw_stat.link_down_time =
7457                                 jiffies - sp->start_time;
7458                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7459                         netif_carrier_on(dev);
7460                 }
7461         }
7462         sp->last_link_state = link;
7463         sp->start_time = jiffies;
7464 }
7465
7466 /**
7467  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7468  *  @sp : private member of the device structure, which is a pointer to the
7469  *  s2io_nic structure.
7470  *  Description:
7471  *  This function initializes a few of the PCI and PCI-X configuration registers
7472  *  with recommended values.
7473  *  Return value:
7474  *  void
7475  */
7476
7477 static void s2io_init_pci(struct s2io_nic * sp)
7478 {
7479         u16 pci_cmd = 0, pcix_cmd = 0;
7480
7481         /* Enable Data Parity Error Recovery in PCI-X command register. */
7482         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7483                              &(pcix_cmd));
7484         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7485                               (pcix_cmd | 1));
7486         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7487                              &(pcix_cmd));
7488
7489         /* Set the PErr Response bit in PCI command register. */
7490         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7491         pci_write_config_word(sp->pdev, PCI_COMMAND,
7492                               (pci_cmd | PCI_COMMAND_PARITY));
7493         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7494 }
7495
7496 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7497 {
7498         if ((tx_fifo_num > MAX_TX_FIFOS) ||
7499                 (tx_fifo_num < FIFO_DEFAULT_NUM)) {
7500                 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7501                         "(%d) not supported\n", tx_fifo_num);
7502                 tx_fifo_num =
7503                         ((tx_fifo_num > MAX_TX_FIFOS)? MAX_TX_FIFOS :
7504                         ((tx_fifo_num < FIFO_DEFAULT_NUM) ? FIFO_DEFAULT_NUM :
7505                         tx_fifo_num));
7506                 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7507                 DBG_PRINT(ERR_DBG, "tx fifos\n");
7508         }
7509
7510         if ( rx_ring_num > 8) {
7511                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7512                          "supported\n");
7513                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7514                 rx_ring_num = 8;
7515         }
7516         if (*dev_intr_type != INTA)
7517                 napi = 0;
7518
7519         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7520                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7521                           "Defaulting to INTA\n");
7522                 *dev_intr_type = INTA;
7523         }
7524
7525         if ((*dev_intr_type == MSI_X) &&
7526                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7527                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7528                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7529                                         "Defaulting to INTA\n");
7530                 *dev_intr_type = INTA;
7531         }
7532
7533         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7534                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7535                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7536                 rx_ring_mode = 1;
7537         }
7538         return SUCCESS;
7539 }
7540
7541 /**
7542  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7543  * or Traffic class respectively.
7544  * @nic: device peivate variable
7545  * Description: The function configures the receive steering to
7546  * desired receive ring.
7547  * Return Value:  SUCCESS on success and
7548  * '-1' on failure (endian settings incorrect).
7549  */
7550 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7551 {
7552         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7553         register u64 val64 = 0;
7554
7555         if (ds_codepoint > 63)
7556                 return FAILURE;
7557
7558         val64 = RTS_DS_MEM_DATA(ring);
7559         writeq(val64, &bar0->rts_ds_mem_data);
7560
7561         val64 = RTS_DS_MEM_CTRL_WE |
7562                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7563                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7564
7565         writeq(val64, &bar0->rts_ds_mem_ctrl);
7566
7567         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7568                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7569                                 S2IO_BIT_RESET);
7570 }
7571
7572 /**
7573  *  s2io_init_nic - Initialization of the adapter .
7574  *  @pdev : structure containing the PCI related information of the device.
7575  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7576  *  Description:
7577  *  The function initializes an adapter identified by the pci_dec structure.
7578  *  All OS related initialization including memory and device structure and
7579  *  initlaization of the device private variable is done. Also the swapper
7580  *  control register is initialized to enable read and write into the I/O
7581  *  registers of the device.
7582  *  Return value:
7583  *  returns 0 on success and negative on failure.
7584  */
7585
7586 static int __devinit
7587 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7588 {
7589         struct s2io_nic *sp;
7590         struct net_device *dev;
7591         int i, j, ret;
7592         int dma_flag = FALSE;
7593         u32 mac_up, mac_down;
7594         u64 val64 = 0, tmp64 = 0;
7595         struct XENA_dev_config __iomem *bar0 = NULL;
7596         u16 subid;
7597         struct mac_info *mac_control;
7598         struct config_param *config;
7599         int mode;
7600         u8 dev_intr_type = intr_type;
7601         DECLARE_MAC_BUF(mac);
7602
7603         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7604                 return ret;
7605
7606         if ((ret = pci_enable_device(pdev))) {
7607                 DBG_PRINT(ERR_DBG,
7608                           "s2io_init_nic: pci_enable_device failed\n");
7609                 return ret;
7610         }
7611
7612         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7613                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7614                 dma_flag = TRUE;
7615                 if (pci_set_consistent_dma_mask
7616                     (pdev, DMA_64BIT_MASK)) {
7617                         DBG_PRINT(ERR_DBG,
7618                                   "Unable to obtain 64bit DMA for \
7619                                         consistent allocations\n");
7620                         pci_disable_device(pdev);
7621                         return -ENOMEM;
7622                 }
7623         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7624                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7625         } else {
7626                 pci_disable_device(pdev);
7627                 return -ENOMEM;
7628         }
7629         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7630                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7631                 pci_disable_device(pdev);
7632                 return -ENODEV;
7633         }
7634
7635         dev = alloc_etherdev(sizeof(struct s2io_nic));
7636         if (dev == NULL) {
7637                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7638                 pci_disable_device(pdev);
7639                 pci_release_regions(pdev);
7640                 return -ENODEV;
7641         }
7642
7643         pci_set_master(pdev);
7644         pci_set_drvdata(pdev, dev);
7645         SET_NETDEV_DEV(dev, &pdev->dev);
7646
7647         /*  Private member variable initialized to s2io NIC structure */
7648         sp = dev->priv;
7649         memset(sp, 0, sizeof(struct s2io_nic));
7650         sp->dev = dev;
7651         sp->pdev = pdev;
7652         sp->high_dma_flag = dma_flag;
7653         sp->device_enabled_once = FALSE;
7654         if (rx_ring_mode == 1)
7655                 sp->rxd_mode = RXD_MODE_1;
7656         if (rx_ring_mode == 2)
7657                 sp->rxd_mode = RXD_MODE_3B;
7658
7659         sp->config.intr_type = dev_intr_type;
7660
7661         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7662                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7663                 sp->device_type = XFRAME_II_DEVICE;
7664         else
7665                 sp->device_type = XFRAME_I_DEVICE;
7666
7667         sp->lro = lro_enable;
7668
7669         /* Initialize some PCI/PCI-X fields of the NIC. */
7670         s2io_init_pci(sp);
7671
7672         /*
7673          * Setting the device configuration parameters.
7674          * Most of these parameters can be specified by the user during
7675          * module insertion as they are module loadable parameters. If
7676          * these parameters are not not specified during load time, they
7677          * are initialized with default values.
7678          */
7679         mac_control = &sp->mac_control;
7680         config = &sp->config;
7681
7682         config->napi = napi;
7683
7684         /* Tx side parameters. */
7685         config->tx_fifo_num = tx_fifo_num;
7686         for (i = 0; i < MAX_TX_FIFOS; i++) {
7687                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7688                 config->tx_cfg[i].fifo_priority = i;
7689         }
7690
7691         /* mapping the QoS priority to the configured fifos */
7692         for (i = 0; i < MAX_TX_FIFOS; i++)
7693                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7694
7695         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7696         for (i = 0; i < config->tx_fifo_num; i++) {
7697                 config->tx_cfg[i].f_no_snoop =
7698                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7699                 if (config->tx_cfg[i].fifo_len < 65) {
7700                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7701                         break;
7702                 }
7703         }
7704         /* + 2 because one Txd for skb->data and one Txd for UFO */
7705         config->max_txds = MAX_SKB_FRAGS + 2;
7706
7707         /* Rx side parameters. */
7708         config->rx_ring_num = rx_ring_num;
7709         for (i = 0; i < MAX_RX_RINGS; i++) {
7710                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7711                     (rxd_count[sp->rxd_mode] + 1);
7712                 config->rx_cfg[i].ring_priority = i;
7713         }
7714
7715         for (i = 0; i < rx_ring_num; i++) {
7716                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7717                 config->rx_cfg[i].f_no_snoop =
7718                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7719         }
7720
7721         /*  Setting Mac Control parameters */
7722         mac_control->rmac_pause_time = rmac_pause_time;
7723         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7724         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7725
7726
7727         /* Initialize Ring buffer parameters. */
7728         for (i = 0; i < config->rx_ring_num; i++)
7729                 atomic_set(&sp->rx_bufs_left[i], 0);
7730
7731         /*  initialize the shared memory used by the NIC and the host */
7732         if (init_shared_mem(sp)) {
7733                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7734                           dev->name);
7735                 ret = -ENOMEM;
7736                 goto mem_alloc_failed;
7737         }
7738
7739         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7740                                      pci_resource_len(pdev, 0));
7741         if (!sp->bar0) {
7742                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7743                           dev->name);
7744                 ret = -ENOMEM;
7745                 goto bar0_remap_failed;
7746         }
7747
7748         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7749                                      pci_resource_len(pdev, 2));
7750         if (!sp->bar1) {
7751                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7752                           dev->name);
7753                 ret = -ENOMEM;
7754                 goto bar1_remap_failed;
7755         }
7756
7757         dev->irq = pdev->irq;
7758         dev->base_addr = (unsigned long) sp->bar0;
7759
7760         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7761         for (j = 0; j < MAX_TX_FIFOS; j++) {
7762                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7763                     (sp->bar1 + (j * 0x00020000));
7764         }
7765
7766         /*  Driver entry points */
7767         dev->open = &s2io_open;
7768         dev->stop = &s2io_close;
7769         dev->hard_start_xmit = &s2io_xmit;
7770         dev->get_stats = &s2io_get_stats;
7771         dev->set_multicast_list = &s2io_set_multicast;
7772         dev->do_ioctl = &s2io_ioctl;
7773         dev->set_mac_address = &s2io_set_mac_addr;
7774         dev->change_mtu = &s2io_change_mtu;
7775         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7776         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7777         dev->vlan_rx_register = s2io_vlan_rx_register;
7778
7779         /*
7780          * will use eth_mac_addr() for  dev->set_mac_address
7781          * mac address will be set every time dev->open() is called
7782          */
7783         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7784
7785 #ifdef CONFIG_NET_POLL_CONTROLLER
7786         dev->poll_controller = s2io_netpoll;
7787 #endif
7788
7789         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7790         if (sp->high_dma_flag == TRUE)
7791                 dev->features |= NETIF_F_HIGHDMA;
7792         dev->features |= NETIF_F_TSO;
7793         dev->features |= NETIF_F_TSO6;
7794         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7795                 dev->features |= NETIF_F_UFO;
7796                 dev->features |= NETIF_F_HW_CSUM;
7797         }
7798
7799         dev->tx_timeout = &s2io_tx_watchdog;
7800         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7801         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7802         INIT_WORK(&sp->set_link_task, s2io_set_link);
7803
7804         pci_save_state(sp->pdev);
7805
7806         /* Setting swapper control on the NIC, for proper reset operation */
7807         if (s2io_set_swapper(sp)) {
7808                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7809                           dev->name);
7810                 ret = -EAGAIN;
7811                 goto set_swap_failed;
7812         }
7813
7814         /* Verify if the Herc works on the slot its placed into */
7815         if (sp->device_type & XFRAME_II_DEVICE) {
7816                 mode = s2io_verify_pci_mode(sp);
7817                 if (mode < 0) {
7818                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7819                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7820                         ret = -EBADSLT;
7821                         goto set_swap_failed;
7822                 }
7823         }
7824
7825         /* Not needed for Herc */
7826         if (sp->device_type & XFRAME_I_DEVICE) {
7827                 /*
7828                  * Fix for all "FFs" MAC address problems observed on
7829                  * Alpha platforms
7830                  */
7831                 fix_mac_address(sp);
7832                 s2io_reset(sp);
7833         }
7834
7835         /*
7836          * MAC address initialization.
7837          * For now only one mac address will be read and used.
7838          */
7839         bar0 = sp->bar0;
7840         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7841             RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7842         writeq(val64, &bar0->rmac_addr_cmd_mem);
7843         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7844                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7845         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7846         mac_down = (u32) tmp64;
7847         mac_up = (u32) (tmp64 >> 32);
7848
7849         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7850         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7851         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7852         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7853         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7854         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7855
7856         /*  Set the factory defined MAC address initially   */
7857         dev->addr_len = ETH_ALEN;
7858         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7859         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
7860
7861         /* initialize number of multicast & unicast MAC entries variables */
7862         if (sp->device_type == XFRAME_I_DEVICE) {
7863                 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7864                 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7865                 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7866         } else if (sp->device_type == XFRAME_II_DEVICE) {
7867                 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7868                 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7869                 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7870         }
7871
7872         /* store mac addresses from CAM to s2io_nic structure */
7873         do_s2io_store_unicast_mc(sp);
7874
7875          /* Store the values of the MSIX table in the s2io_nic structure */
7876         store_xmsi_data(sp);
7877         /* reset Nic and bring it to known state */
7878         s2io_reset(sp);
7879
7880         /*
7881          * Initialize the tasklet status and link state flags
7882          * and the card state parameter
7883          */
7884         sp->tasklet_status = 0;
7885         sp->state = 0;
7886
7887         /* Initialize spinlocks */
7888         for (i = 0; i < sp->config.tx_fifo_num; i++)
7889                 spin_lock_init(&mac_control->fifos[i].tx_lock);
7890
7891         if (!napi)
7892                 spin_lock_init(&sp->put_lock);
7893         spin_lock_init(&sp->rx_lock);
7894
7895         /*
7896          * SXE-002: Configure link and activity LED to init state
7897          * on driver load.
7898          */
7899         subid = sp->pdev->subsystem_device;
7900         if ((subid & 0xFF) >= 0x07) {
7901                 val64 = readq(&bar0->gpio_control);
7902                 val64 |= 0x0000800000000000ULL;
7903                 writeq(val64, &bar0->gpio_control);
7904                 val64 = 0x0411040400000000ULL;
7905                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7906                 val64 = readq(&bar0->gpio_control);
7907         }
7908
7909         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7910
7911         if (register_netdev(dev)) {
7912                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7913                 ret = -ENODEV;
7914                 goto register_failed;
7915         }
7916         s2io_vpd_read(sp);
7917         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7918         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7919                   sp->product_name, pdev->revision);
7920         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7921                   s2io_driver_version);
7922         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7923                   dev->name, print_mac(mac, dev->dev_addr));
7924         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7925         if (sp->device_type & XFRAME_II_DEVICE) {
7926                 mode = s2io_print_pci_mode(sp);
7927                 if (mode < 0) {
7928                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7929                         ret = -EBADSLT;
7930                         unregister_netdev(dev);
7931                         goto set_swap_failed;
7932                 }
7933         }
7934         switch(sp->rxd_mode) {
7935                 case RXD_MODE_1:
7936                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7937                                                 dev->name);
7938                     break;
7939                 case RXD_MODE_3B:
7940                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7941                                                 dev->name);
7942                     break;
7943         }
7944
7945         if (napi)
7946                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7947         switch(sp->config.intr_type) {
7948                 case INTA:
7949                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7950                     break;
7951                 case MSI_X:
7952                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7953                     break;
7954         }
7955         if (sp->lro)
7956                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7957                           dev->name);
7958         if (ufo)
7959                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7960                                         " enabled\n", dev->name);
7961         /* Initialize device name */
7962         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7963
7964         /*
7965          * Make Link state as off at this point, when the Link change
7966          * interrupt comes the state will be automatically changed to
7967          * the right state.
7968          */
7969         netif_carrier_off(dev);
7970
7971         return 0;
7972
7973       register_failed:
7974       set_swap_failed:
7975         iounmap(sp->bar1);
7976       bar1_remap_failed:
7977         iounmap(sp->bar0);
7978       bar0_remap_failed:
7979       mem_alloc_failed:
7980         free_shared_mem(sp);
7981         pci_disable_device(pdev);
7982         pci_release_regions(pdev);
7983         pci_set_drvdata(pdev, NULL);
7984         free_netdev(dev);
7985
7986         return ret;
7987 }
7988
7989 /**
7990  * s2io_rem_nic - Free the PCI device
7991  * @pdev: structure containing the PCI related information of the device.
7992  * Description: This function is called by the Pci subsystem to release a
7993  * PCI device and free up all resource held up by the device. This could
7994  * be in response to a Hot plug event or when the driver is to be removed
7995  * from memory.
7996  */
7997
7998 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7999 {
8000         struct net_device *dev =
8001             (struct net_device *) pci_get_drvdata(pdev);
8002         struct s2io_nic *sp;
8003
8004         if (dev == NULL) {
8005                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8006                 return;
8007         }
8008
8009         flush_scheduled_work();
8010
8011         sp = dev->priv;
8012         unregister_netdev(dev);
8013
8014         free_shared_mem(sp);
8015         iounmap(sp->bar0);
8016         iounmap(sp->bar1);
8017         pci_release_regions(pdev);
8018         pci_set_drvdata(pdev, NULL);
8019         free_netdev(dev);
8020         pci_disable_device(pdev);
8021 }
8022
8023 /**
8024  * s2io_starter - Entry point for the driver
8025  * Description: This function is the entry point for the driver. It verifies
8026  * the module loadable parameters and initializes PCI configuration space.
8027  */
8028
8029 static int __init s2io_starter(void)
8030 {
8031         return pci_register_driver(&s2io_driver);
8032 }
8033
8034 /**
8035  * s2io_closer - Cleanup routine for the driver
8036  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8037  */
8038
8039 static __exit void s2io_closer(void)
8040 {
8041         pci_unregister_driver(&s2io_driver);
8042         DBG_PRINT(INIT_DBG, "cleanup done\n");
8043 }
8044
8045 module_init(s2io_starter);
8046 module_exit(s2io_closer);
8047
8048 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8049                 struct tcphdr **tcp, struct RxD_t *rxdp)
8050 {
8051         int ip_off;
8052         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8053
8054         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8055                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8056                           __FUNCTION__);
8057                 return -1;
8058         }
8059
8060         /* TODO:
8061          * By default the VLAN field in the MAC is stripped by the card, if this
8062          * feature is turned off in rx_pa_cfg register, then the ip_off field
8063          * has to be shifted by a further 2 bytes
8064          */
8065         switch (l2_type) {
8066                 case 0: /* DIX type */
8067                 case 4: /* DIX type with VLAN */
8068                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8069                         break;
8070                 /* LLC, SNAP etc are considered non-mergeable */
8071                 default:
8072                         return -1;
8073         }
8074
8075         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8076         ip_len = (u8)((*ip)->ihl);
8077         ip_len <<= 2;
8078         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8079
8080         return 0;
8081 }
8082
8083 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8084                                   struct tcphdr *tcp)
8085 {
8086         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8087         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8088            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8089                 return -1;
8090         return 0;
8091 }
8092
8093 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8094 {
8095         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8096 }
8097
8098 static void initiate_new_session(struct lro *lro, u8 *l2h,
8099                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
8100 {
8101         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8102         lro->l2h = l2h;
8103         lro->iph = ip;
8104         lro->tcph = tcp;
8105         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8106         lro->tcp_ack = ntohl(tcp->ack_seq);
8107         lro->sg_num = 1;
8108         lro->total_len = ntohs(ip->tot_len);
8109         lro->frags_len = 0;
8110         /*
8111          * check if we saw TCP timestamp. Other consistency checks have
8112          * already been done.
8113          */
8114         if (tcp->doff == 8) {
8115                 u32 *ptr;
8116                 ptr = (u32 *)(tcp+1);
8117                 lro->saw_ts = 1;
8118                 lro->cur_tsval = *(ptr+1);
8119                 lro->cur_tsecr = *(ptr+2);
8120         }
8121         lro->in_use = 1;
8122 }
8123
8124 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8125 {
8126         struct iphdr *ip = lro->iph;
8127         struct tcphdr *tcp = lro->tcph;
8128         __sum16 nchk;
8129         struct stat_block *statinfo = sp->mac_control.stats_info;
8130         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8131
8132         /* Update L3 header */
8133         ip->tot_len = htons(lro->total_len);
8134         ip->check = 0;
8135         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8136         ip->check = nchk;
8137
8138         /* Update L4 header */
8139         tcp->ack_seq = lro->tcp_ack;
8140         tcp->window = lro->window;
8141
8142         /* Update tsecr field if this session has timestamps enabled */
8143         if (lro->saw_ts) {
8144                 u32 *ptr = (u32 *)(tcp + 1);
8145                 *(ptr+2) = lro->cur_tsecr;
8146         }
8147
8148         /* Update counters required for calculation of
8149          * average no. of packets aggregated.
8150          */
8151         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8152         statinfo->sw_stat.num_aggregations++;
8153 }
8154
8155 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8156                 struct tcphdr *tcp, u32 l4_pyld)
8157 {
8158         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8159         lro->total_len += l4_pyld;
8160         lro->frags_len += l4_pyld;
8161         lro->tcp_next_seq += l4_pyld;
8162         lro->sg_num++;
8163
8164         /* Update ack seq no. and window ad(from this pkt) in LRO object */
8165         lro->tcp_ack = tcp->ack_seq;
8166         lro->window = tcp->window;
8167
8168         if (lro->saw_ts) {
8169                 u32 *ptr;
8170                 /* Update tsecr and tsval from this packet */
8171                 ptr = (u32 *) (tcp + 1);
8172                 lro->cur_tsval = *(ptr + 1);
8173                 lro->cur_tsecr = *(ptr + 2);
8174         }
8175 }
8176
8177 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8178                                     struct tcphdr *tcp, u32 tcp_pyld_len)
8179 {
8180         u8 *ptr;
8181
8182         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8183
8184         if (!tcp_pyld_len) {
8185                 /* Runt frame or a pure ack */
8186                 return -1;
8187         }
8188
8189         if (ip->ihl != 5) /* IP has options */
8190                 return -1;
8191
8192         /* If we see CE codepoint in IP header, packet is not mergeable */
8193         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8194                 return -1;
8195
8196         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8197         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8198                                     tcp->ece || tcp->cwr || !tcp->ack) {
8199                 /*
8200                  * Currently recognize only the ack control word and
8201                  * any other control field being set would result in
8202                  * flushing the LRO session
8203                  */
8204                 return -1;
8205         }
8206
8207         /*
8208          * Allow only one TCP timestamp option. Don't aggregate if
8209          * any other options are detected.
8210          */
8211         if (tcp->doff != 5 && tcp->doff != 8)
8212                 return -1;
8213
8214         if (tcp->doff == 8) {
8215                 ptr = (u8 *)(tcp + 1);
8216                 while (*ptr == TCPOPT_NOP)
8217                         ptr++;
8218                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8219                         return -1;
8220
8221                 /* Ensure timestamp value increases monotonically */
8222                 if (l_lro)
8223                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
8224                                 return -1;
8225
8226                 /* timestamp echo reply should be non-zero */
8227                 if (*((u32 *)(ptr+6)) == 0)
8228                         return -1;
8229         }
8230
8231         return 0;
8232 }
8233
8234 static int
8235 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8236                       struct RxD_t *rxdp, struct s2io_nic *sp)
8237 {
8238         struct iphdr *ip;
8239         struct tcphdr *tcph;
8240         int ret = 0, i;
8241
8242         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8243                                          rxdp))) {
8244                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8245                           ip->saddr, ip->daddr);
8246         } else {
8247                 return ret;
8248         }
8249
8250         tcph = (struct tcphdr *)*tcp;
8251         *tcp_len = get_l4_pyld_length(ip, tcph);
8252         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8253                 struct lro *l_lro = &sp->lro0_n[i];
8254                 if (l_lro->in_use) {
8255                         if (check_for_socket_match(l_lro, ip, tcph))
8256                                 continue;
8257                         /* Sock pair matched */
8258                         *lro = l_lro;
8259
8260                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8261                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8262                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8263                                           (*lro)->tcp_next_seq,
8264                                           ntohl(tcph->seq));
8265
8266                                 sp->mac_control.stats_info->
8267                                    sw_stat.outof_sequence_pkts++;
8268                                 ret = 2;
8269                                 break;
8270                         }
8271
8272                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8273                                 ret = 1; /* Aggregate */
8274                         else
8275                                 ret = 2; /* Flush both */
8276                         break;
8277                 }
8278         }
8279
8280         if (ret == 0) {
8281                 /* Before searching for available LRO objects,
8282                  * check if the pkt is L3/L4 aggregatable. If not
8283                  * don't create new LRO session. Just send this
8284                  * packet up.
8285                  */
8286                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8287                         return 5;
8288                 }
8289
8290                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8291                         struct lro *l_lro = &sp->lro0_n[i];
8292                         if (!(l_lro->in_use)) {
8293                                 *lro = l_lro;
8294                                 ret = 3; /* Begin anew */
8295                                 break;
8296                         }
8297                 }
8298         }
8299
8300         if (ret == 0) { /* sessions exceeded */
8301                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8302                           __FUNCTION__);
8303                 *lro = NULL;
8304                 return ret;
8305         }
8306
8307         switch (ret) {
8308                 case 3:
8309                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8310                         break;
8311                 case 2:
8312                         update_L3L4_header(sp, *lro);
8313                         break;
8314                 case 1:
8315                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8316                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8317                                 update_L3L4_header(sp, *lro);
8318                                 ret = 4; /* Flush the LRO */
8319                         }
8320                         break;
8321                 default:
8322                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8323                                 __FUNCTION__);
8324                         break;
8325         }
8326
8327         return ret;
8328 }
8329
8330 static void clear_lro_session(struct lro *lro)
8331 {
8332         static u16 lro_struct_size = sizeof(struct lro);
8333
8334         memset(lro, 0, lro_struct_size);
8335 }
8336
8337 static void queue_rx_frame(struct sk_buff *skb)
8338 {
8339         struct net_device *dev = skb->dev;
8340
8341         skb->protocol = eth_type_trans(skb, dev);
8342         if (napi)
8343                 netif_receive_skb(skb);
8344         else
8345                 netif_rx(skb);
8346 }
8347
8348 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8349                            struct sk_buff *skb,
8350                            u32 tcp_len)
8351 {
8352         struct sk_buff *first = lro->parent;
8353
8354         first->len += tcp_len;
8355         first->data_len = lro->frags_len;
8356         skb_pull(skb, (skb->len - tcp_len));
8357         if (skb_shinfo(first)->frag_list)
8358                 lro->last_frag->next = skb;
8359         else
8360                 skb_shinfo(first)->frag_list = skb;
8361         first->truesize += skb->truesize;
8362         lro->last_frag = skb;
8363         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8364         return;
8365 }
8366
8367 /**
8368  * s2io_io_error_detected - called when PCI error is detected
8369  * @pdev: Pointer to PCI device
8370  * @state: The current pci connection state
8371  *
8372  * This function is called after a PCI bus error affecting
8373  * this device has been detected.
8374  */
8375 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8376                                                pci_channel_state_t state)
8377 {
8378         struct net_device *netdev = pci_get_drvdata(pdev);
8379         struct s2io_nic *sp = netdev->priv;
8380
8381         netif_device_detach(netdev);
8382
8383         if (netif_running(netdev)) {
8384                 /* Bring down the card, while avoiding PCI I/O */
8385                 do_s2io_card_down(sp, 0);
8386         }
8387         pci_disable_device(pdev);
8388
8389         return PCI_ERS_RESULT_NEED_RESET;
8390 }
8391
8392 /**
8393  * s2io_io_slot_reset - called after the pci bus has been reset.
8394  * @pdev: Pointer to PCI device
8395  *
8396  * Restart the card from scratch, as if from a cold-boot.
8397  * At this point, the card has exprienced a hard reset,
8398  * followed by fixups by BIOS, and has its config space
8399  * set up identically to what it was at cold boot.
8400  */
8401 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8402 {
8403         struct net_device *netdev = pci_get_drvdata(pdev);
8404         struct s2io_nic *sp = netdev->priv;
8405
8406         if (pci_enable_device(pdev)) {
8407                 printk(KERN_ERR "s2io: "
8408                        "Cannot re-enable PCI device after reset.\n");
8409                 return PCI_ERS_RESULT_DISCONNECT;
8410         }
8411
8412         pci_set_master(pdev);
8413         s2io_reset(sp);
8414
8415         return PCI_ERS_RESULT_RECOVERED;
8416 }
8417
8418 /**
8419  * s2io_io_resume - called when traffic can start flowing again.
8420  * @pdev: Pointer to PCI device
8421  *
8422  * This callback is called when the error recovery driver tells
8423  * us that its OK to resume normal operation.
8424  */
8425 static void s2io_io_resume(struct pci_dev *pdev)
8426 {
8427         struct net_device *netdev = pci_get_drvdata(pdev);
8428         struct s2io_nic *sp = netdev->priv;
8429
8430         if (netif_running(netdev)) {
8431                 if (s2io_card_up(sp)) {
8432                         printk(KERN_ERR "s2io: "
8433                                "Can't bring device back up after reset.\n");
8434                         return;
8435                 }
8436
8437                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8438                         s2io_card_down(sp);
8439                         printk(KERN_ERR "s2io: "
8440                                "Can't resetore mac addr after reset.\n");
8441                         return;
8442                 }
8443         }
8444
8445         netif_device_attach(netdev);
8446         netif_wake_queue(netdev);
8447 }