s2io.c: Shorten code line length by using intermediate pointers
[safe/jmp/linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/mdio.h>
67 #include <linux/skbuff.h>
68 #include <linux/init.h>
69 #include <linux/delay.h>
70 #include <linux/stddef.h>
71 #include <linux/ioctl.h>
72 #include <linux/timex.h>
73 #include <linux/ethtool.h>
74 #include <linux/workqueue.h>
75 #include <linux/if_vlan.h>
76 #include <linux/ip.h>
77 #include <linux/tcp.h>
78 #include <net/tcp.h>
79
80 #include <asm/system.h>
81 #include <asm/uaccess.h>
82 #include <asm/io.h>
83 #include <asm/div64.h>
84 #include <asm/irq.h>
85
86 /* local include */
87 #include "s2io.h"
88 #include "s2io-regs.h"
89
90 #define DRV_VERSION "2.0.26.25"
91
92 /* S2io Driver name & version. */
93 static char s2io_driver_name[] = "Neterion";
94 static char s2io_driver_version[] = DRV_VERSION;
95
96 static int rxd_size[2] = {32,48};
97 static int rxd_count[2] = {127,85};
98
99 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 {
101         int ret;
102
103         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
104                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
105
106         return ret;
107 }
108
109 /*
110  * Cards with following subsystem_id have a link state indication
111  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
112  * macro below identifies these cards given the subsystem_id.
113  */
114 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
115         (dev_type == XFRAME_I_DEVICE) ?                 \
116                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
117                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
118
119 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
120                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
121
122 static inline int is_s2io_card_up(const struct s2io_nic * sp)
123 {
124         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
125 }
126
127 /* Ethtool related variables and Macros. */
128 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
129         "Register test\t(offline)",
130         "Eeprom test\t(offline)",
131         "Link test\t(online)",
132         "RLDRAM test\t(offline)",
133         "BIST Test\t(offline)"
134 };
135
136 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
137         {"tmac_frms"},
138         {"tmac_data_octets"},
139         {"tmac_drop_frms"},
140         {"tmac_mcst_frms"},
141         {"tmac_bcst_frms"},
142         {"tmac_pause_ctrl_frms"},
143         {"tmac_ttl_octets"},
144         {"tmac_ucst_frms"},
145         {"tmac_nucst_frms"},
146         {"tmac_any_err_frms"},
147         {"tmac_ttl_less_fb_octets"},
148         {"tmac_vld_ip_octets"},
149         {"tmac_vld_ip"},
150         {"tmac_drop_ip"},
151         {"tmac_icmp"},
152         {"tmac_rst_tcp"},
153         {"tmac_tcp"},
154         {"tmac_udp"},
155         {"rmac_vld_frms"},
156         {"rmac_data_octets"},
157         {"rmac_fcs_err_frms"},
158         {"rmac_drop_frms"},
159         {"rmac_vld_mcst_frms"},
160         {"rmac_vld_bcst_frms"},
161         {"rmac_in_rng_len_err_frms"},
162         {"rmac_out_rng_len_err_frms"},
163         {"rmac_long_frms"},
164         {"rmac_pause_ctrl_frms"},
165         {"rmac_unsup_ctrl_frms"},
166         {"rmac_ttl_octets"},
167         {"rmac_accepted_ucst_frms"},
168         {"rmac_accepted_nucst_frms"},
169         {"rmac_discarded_frms"},
170         {"rmac_drop_events"},
171         {"rmac_ttl_less_fb_octets"},
172         {"rmac_ttl_frms"},
173         {"rmac_usized_frms"},
174         {"rmac_osized_frms"},
175         {"rmac_frag_frms"},
176         {"rmac_jabber_frms"},
177         {"rmac_ttl_64_frms"},
178         {"rmac_ttl_65_127_frms"},
179         {"rmac_ttl_128_255_frms"},
180         {"rmac_ttl_256_511_frms"},
181         {"rmac_ttl_512_1023_frms"},
182         {"rmac_ttl_1024_1518_frms"},
183         {"rmac_ip"},
184         {"rmac_ip_octets"},
185         {"rmac_hdr_err_ip"},
186         {"rmac_drop_ip"},
187         {"rmac_icmp"},
188         {"rmac_tcp"},
189         {"rmac_udp"},
190         {"rmac_err_drp_udp"},
191         {"rmac_xgmii_err_sym"},
192         {"rmac_frms_q0"},
193         {"rmac_frms_q1"},
194         {"rmac_frms_q2"},
195         {"rmac_frms_q3"},
196         {"rmac_frms_q4"},
197         {"rmac_frms_q5"},
198         {"rmac_frms_q6"},
199         {"rmac_frms_q7"},
200         {"rmac_full_q0"},
201         {"rmac_full_q1"},
202         {"rmac_full_q2"},
203         {"rmac_full_q3"},
204         {"rmac_full_q4"},
205         {"rmac_full_q5"},
206         {"rmac_full_q6"},
207         {"rmac_full_q7"},
208         {"rmac_pause_cnt"},
209         {"rmac_xgmii_data_err_cnt"},
210         {"rmac_xgmii_ctrl_err_cnt"},
211         {"rmac_accepted_ip"},
212         {"rmac_err_tcp"},
213         {"rd_req_cnt"},
214         {"new_rd_req_cnt"},
215         {"new_rd_req_rtry_cnt"},
216         {"rd_rtry_cnt"},
217         {"wr_rtry_rd_ack_cnt"},
218         {"wr_req_cnt"},
219         {"new_wr_req_cnt"},
220         {"new_wr_req_rtry_cnt"},
221         {"wr_rtry_cnt"},
222         {"wr_disc_cnt"},
223         {"rd_rtry_wr_ack_cnt"},
224         {"txp_wr_cnt"},
225         {"txd_rd_cnt"},
226         {"txd_wr_cnt"},
227         {"rxd_rd_cnt"},
228         {"rxd_wr_cnt"},
229         {"txf_rd_cnt"},
230         {"rxf_wr_cnt"}
231 };
232
233 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
234         {"rmac_ttl_1519_4095_frms"},
235         {"rmac_ttl_4096_8191_frms"},
236         {"rmac_ttl_8192_max_frms"},
237         {"rmac_ttl_gt_max_frms"},
238         {"rmac_osized_alt_frms"},
239         {"rmac_jabber_alt_frms"},
240         {"rmac_gt_max_alt_frms"},
241         {"rmac_vlan_frms"},
242         {"rmac_len_discard"},
243         {"rmac_fcs_discard"},
244         {"rmac_pf_discard"},
245         {"rmac_da_discard"},
246         {"rmac_red_discard"},
247         {"rmac_rts_discard"},
248         {"rmac_ingm_full_discard"},
249         {"link_fault_cnt"}
250 };
251
252 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
253         {"\n DRIVER STATISTICS"},
254         {"single_bit_ecc_errs"},
255         {"double_bit_ecc_errs"},
256         {"parity_err_cnt"},
257         {"serious_err_cnt"},
258         {"soft_reset_cnt"},
259         {"fifo_full_cnt"},
260         {"ring_0_full_cnt"},
261         {"ring_1_full_cnt"},
262         {"ring_2_full_cnt"},
263         {"ring_3_full_cnt"},
264         {"ring_4_full_cnt"},
265         {"ring_5_full_cnt"},
266         {"ring_6_full_cnt"},
267         {"ring_7_full_cnt"},
268         {"alarm_transceiver_temp_high"},
269         {"alarm_transceiver_temp_low"},
270         {"alarm_laser_bias_current_high"},
271         {"alarm_laser_bias_current_low"},
272         {"alarm_laser_output_power_high"},
273         {"alarm_laser_output_power_low"},
274         {"warn_transceiver_temp_high"},
275         {"warn_transceiver_temp_low"},
276         {"warn_laser_bias_current_high"},
277         {"warn_laser_bias_current_low"},
278         {"warn_laser_output_power_high"},
279         {"warn_laser_output_power_low"},
280         {"lro_aggregated_pkts"},
281         {"lro_flush_both_count"},
282         {"lro_out_of_sequence_pkts"},
283         {"lro_flush_due_to_max_pkts"},
284         {"lro_avg_aggr_pkts"},
285         {"mem_alloc_fail_cnt"},
286         {"pci_map_fail_cnt"},
287         {"watchdog_timer_cnt"},
288         {"mem_allocated"},
289         {"mem_freed"},
290         {"link_up_cnt"},
291         {"link_down_cnt"},
292         {"link_up_time"},
293         {"link_down_time"},
294         {"tx_tcode_buf_abort_cnt"},
295         {"tx_tcode_desc_abort_cnt"},
296         {"tx_tcode_parity_err_cnt"},
297         {"tx_tcode_link_loss_cnt"},
298         {"tx_tcode_list_proc_err_cnt"},
299         {"rx_tcode_parity_err_cnt"},
300         {"rx_tcode_abort_cnt"},
301         {"rx_tcode_parity_abort_cnt"},
302         {"rx_tcode_rda_fail_cnt"},
303         {"rx_tcode_unkn_prot_cnt"},
304         {"rx_tcode_fcs_err_cnt"},
305         {"rx_tcode_buf_size_err_cnt"},
306         {"rx_tcode_rxd_corrupt_cnt"},
307         {"rx_tcode_unkn_err_cnt"},
308         {"tda_err_cnt"},
309         {"pfc_err_cnt"},
310         {"pcc_err_cnt"},
311         {"tti_err_cnt"},
312         {"tpa_err_cnt"},
313         {"sm_err_cnt"},
314         {"lso_err_cnt"},
315         {"mac_tmac_err_cnt"},
316         {"mac_rmac_err_cnt"},
317         {"xgxs_txgxs_err_cnt"},
318         {"xgxs_rxgxs_err_cnt"},
319         {"rc_err_cnt"},
320         {"prc_pcix_err_cnt"},
321         {"rpa_err_cnt"},
322         {"rda_err_cnt"},
323         {"rti_err_cnt"},
324         {"mc_err_cnt"}
325 };
326
327 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
328 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
329 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
330
331 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
332 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
333
334 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
335 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
336
337 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
338 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
339
340 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
341                         init_timer(&timer);                     \
342                         timer.function = handle;                \
343                         timer.data = (unsigned long) arg;       \
344                         mod_timer(&timer, (jiffies + exp))      \
345
346 /* copy mac addr to def_mac_addr array */
347 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
348 {
349         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
350         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
351         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
352         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
353         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
354         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
355 }
356
357 /* Add the vlan */
358 static void s2io_vlan_rx_register(struct net_device *dev,
359                                   struct vlan_group *grp)
360 {
361         int i;
362         struct s2io_nic *nic = netdev_priv(dev);
363         unsigned long flags[MAX_TX_FIFOS];
364         struct mac_info *mac_control = &nic->mac_control;
365         struct config_param *config = &nic->config;
366
367         for (i = 0; i < config->tx_fifo_num; i++) {
368                 struct fifo_info *fifo = &mac_control->fifos[i];
369
370                 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
371         }
372
373         nic->vlgrp = grp;
374
375         for (i = config->tx_fifo_num - 1; i >= 0; i--) {
376                 struct fifo_info *fifo = &mac_control->fifos[i];
377
378                 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
379         }
380 }
381
382 /* Unregister the vlan */
383 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
384 {
385         int i;
386         struct s2io_nic *nic = netdev_priv(dev);
387         unsigned long flags[MAX_TX_FIFOS];
388         struct mac_info *mac_control = &nic->mac_control;
389         struct config_param *config = &nic->config;
390
391         for (i = 0; i < config->tx_fifo_num; i++) {
392                 struct fifo_info *fifo = &mac_control->fifos[i];
393
394                 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
395         }
396
397         if (nic->vlgrp)
398                 vlan_group_set_device(nic->vlgrp, vid, NULL);
399
400         for (i = config->tx_fifo_num - 1; i >= 0; i--) {
401                 struct fifo_info *fifo = &mac_control->fifos[i];
402
403                 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
404         }
405 }
406
407 /*
408  * Constants to be programmed into the Xena's registers, to configure
409  * the XAUI.
410  */
411
412 #define END_SIGN        0x0
413 static const u64 herc_act_dtx_cfg[] = {
414         /* Set address */
415         0x8000051536750000ULL, 0x80000515367500E0ULL,
416         /* Write data */
417         0x8000051536750004ULL, 0x80000515367500E4ULL,
418         /* Set address */
419         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
420         /* Write data */
421         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
422         /* Set address */
423         0x801205150D440000ULL, 0x801205150D4400E0ULL,
424         /* Write data */
425         0x801205150D440004ULL, 0x801205150D4400E4ULL,
426         /* Set address */
427         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
428         /* Write data */
429         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
430         /* Done */
431         END_SIGN
432 };
433
434 static const u64 xena_dtx_cfg[] = {
435         /* Set address */
436         0x8000051500000000ULL, 0x80000515000000E0ULL,
437         /* Write data */
438         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
439         /* Set address */
440         0x8001051500000000ULL, 0x80010515000000E0ULL,
441         /* Write data */
442         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
443         /* Set address */
444         0x8002051500000000ULL, 0x80020515000000E0ULL,
445         /* Write data */
446         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
447         END_SIGN
448 };
449
450 /*
451  * Constants for Fixing the MacAddress problem seen mostly on
452  * Alpha machines.
453  */
454 static const u64 fix_mac[] = {
455         0x0060000000000000ULL, 0x0060600000000000ULL,
456         0x0040600000000000ULL, 0x0000600000000000ULL,
457         0x0020600000000000ULL, 0x0060600000000000ULL,
458         0x0020600000000000ULL, 0x0060600000000000ULL,
459         0x0020600000000000ULL, 0x0060600000000000ULL,
460         0x0020600000000000ULL, 0x0060600000000000ULL,
461         0x0020600000000000ULL, 0x0060600000000000ULL,
462         0x0020600000000000ULL, 0x0060600000000000ULL,
463         0x0020600000000000ULL, 0x0060600000000000ULL,
464         0x0020600000000000ULL, 0x0060600000000000ULL,
465         0x0020600000000000ULL, 0x0060600000000000ULL,
466         0x0020600000000000ULL, 0x0060600000000000ULL,
467         0x0020600000000000ULL, 0x0000600000000000ULL,
468         0x0040600000000000ULL, 0x0060600000000000ULL,
469         END_SIGN
470 };
471
472 MODULE_LICENSE("GPL");
473 MODULE_VERSION(DRV_VERSION);
474
475
476 /* Module Loadable parameters. */
477 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
478 S2IO_PARM_INT(rx_ring_num, 1);
479 S2IO_PARM_INT(multiq, 0);
480 S2IO_PARM_INT(rx_ring_mode, 1);
481 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
482 S2IO_PARM_INT(rmac_pause_time, 0x100);
483 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
484 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
485 S2IO_PARM_INT(shared_splits, 0);
486 S2IO_PARM_INT(tmac_util_period, 5);
487 S2IO_PARM_INT(rmac_util_period, 5);
488 S2IO_PARM_INT(l3l4hdr_size, 128);
489 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
490 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
491 /* Frequency of Rx desc syncs expressed as power of 2 */
492 S2IO_PARM_INT(rxsync_frequency, 3);
493 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
494 S2IO_PARM_INT(intr_type, 2);
495 /* Large receive offload feature */
496 static unsigned int lro_enable;
497 module_param_named(lro, lro_enable, uint, 0);
498
499 /* Max pkts to be aggregated by LRO at one time. If not specified,
500  * aggregation happens until we hit max IP pkt size(64K)
501  */
502 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
503 S2IO_PARM_INT(indicate_max_pkts, 0);
504
505 S2IO_PARM_INT(napi, 1);
506 S2IO_PARM_INT(ufo, 0);
507 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
508
509 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
510     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
511 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
512     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
513 static unsigned int rts_frm_len[MAX_RX_RINGS] =
514     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
515
516 module_param_array(tx_fifo_len, uint, NULL, 0);
517 module_param_array(rx_ring_sz, uint, NULL, 0);
518 module_param_array(rts_frm_len, uint, NULL, 0);
519
520 /*
521  * S2IO device table.
522  * This table lists all the devices that this driver supports.
523  */
524 static struct pci_device_id s2io_tbl[] __devinitdata = {
525         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
526          PCI_ANY_ID, PCI_ANY_ID},
527         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
528          PCI_ANY_ID, PCI_ANY_ID},
529         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
530          PCI_ANY_ID, PCI_ANY_ID},
531         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
532          PCI_ANY_ID, PCI_ANY_ID},
533         {0,}
534 };
535
536 MODULE_DEVICE_TABLE(pci, s2io_tbl);
537
538 static struct pci_error_handlers s2io_err_handler = {
539         .error_detected = s2io_io_error_detected,
540         .slot_reset = s2io_io_slot_reset,
541         .resume = s2io_io_resume,
542 };
543
544 static struct pci_driver s2io_driver = {
545       .name = "S2IO",
546       .id_table = s2io_tbl,
547       .probe = s2io_init_nic,
548       .remove = __devexit_p(s2io_rem_nic),
549       .err_handler = &s2io_err_handler,
550 };
551
552 /* A simplifier macro used both by init and free shared_mem Fns(). */
553 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
554
555 /* netqueue manipulation helper functions */
556 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
557 {
558         if (!sp->config.multiq) {
559                 int i;
560
561                 for (i = 0; i < sp->config.tx_fifo_num; i++)
562                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
563         }
564         netif_tx_stop_all_queues(sp->dev);
565 }
566
567 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
568 {
569         if (!sp->config.multiq)
570                 sp->mac_control.fifos[fifo_no].queue_state =
571                         FIFO_QUEUE_STOP;
572
573         netif_tx_stop_all_queues(sp->dev);
574 }
575
576 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
577 {
578         if (!sp->config.multiq) {
579                 int i;
580
581                 for (i = 0; i < sp->config.tx_fifo_num; i++)
582                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
583         }
584         netif_tx_start_all_queues(sp->dev);
585 }
586
587 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
588 {
589         if (!sp->config.multiq)
590                 sp->mac_control.fifos[fifo_no].queue_state =
591                         FIFO_QUEUE_START;
592
593         netif_tx_start_all_queues(sp->dev);
594 }
595
596 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
597 {
598         if (!sp->config.multiq) {
599                 int i;
600
601                 for (i = 0; i < sp->config.tx_fifo_num; i++)
602                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603         }
604         netif_tx_wake_all_queues(sp->dev);
605 }
606
607 static inline void s2io_wake_tx_queue(
608         struct fifo_info *fifo, int cnt, u8 multiq)
609 {
610
611         if (multiq) {
612                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
613                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
614         } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
615                 if (netif_queue_stopped(fifo->dev)) {
616                         fifo->queue_state = FIFO_QUEUE_START;
617                         netif_wake_queue(fifo->dev);
618                 }
619         }
620 }
621
622 /**
623  * init_shared_mem - Allocation and Initialization of Memory
624  * @nic: Device private variable.
625  * Description: The function allocates all the memory areas shared
626  * between the NIC and the driver. This includes Tx descriptors,
627  * Rx descriptors and the statistics block.
628  */
629
630 static int init_shared_mem(struct s2io_nic *nic)
631 {
632         u32 size;
633         void *tmp_v_addr, *tmp_v_addr_next;
634         dma_addr_t tmp_p_addr, tmp_p_addr_next;
635         struct RxD_block *pre_rxd_blk = NULL;
636         int i, j, blk_cnt;
637         int lst_size, lst_per_page;
638         struct net_device *dev = nic->dev;
639         unsigned long tmp;
640         struct buffAdd *ba;
641
642         struct mac_info *mac_control;
643         struct config_param *config;
644         unsigned long long mem_allocated = 0;
645
646         mac_control = &nic->mac_control;
647         config = &nic->config;
648
649         /* Allocation and initialization of TXDLs in FIFOs */
650         size = 0;
651         for (i = 0; i < config->tx_fifo_num; i++) {
652                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
653
654                 size += tx_cfg->fifo_len;
655         }
656         if (size > MAX_AVAILABLE_TXDS) {
657                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
658                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
659                 return -EINVAL;
660         }
661
662         size = 0;
663         for (i = 0; i < config->tx_fifo_num; i++) {
664                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
665
666                 size = tx_cfg->fifo_len;
667                 /*
668                  * Legal values are from 2 to 8192
669                  */
670                 if (size < 2) {
671                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
672                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
673                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
674                                 "are 2 to 8192\n");
675                         return -EINVAL;
676                 }
677         }
678
679         lst_size = (sizeof(struct TxD) * config->max_txds);
680         lst_per_page = PAGE_SIZE / lst_size;
681
682         for (i = 0; i < config->tx_fifo_num; i++) {
683                 struct fifo_info *fifo = &mac_control->fifos[i];
684                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
685                 int fifo_len = tx_cfg->fifo_len;
686                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
687
688                 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
689                 if (!fifo->list_info) {
690                         DBG_PRINT(INFO_DBG,
691                                   "Malloc failed for list_info\n");
692                         return -ENOMEM;
693                 }
694                 mem_allocated += list_holder_size;
695         }
696         for (i = 0; i < config->tx_fifo_num; i++) {
697                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
698                                                 lst_per_page);
699                 struct fifo_info *fifo = &mac_control->fifos[i];
700                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
701
702                 fifo->tx_curr_put_info.offset = 0;
703                 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
704                 fifo->tx_curr_get_info.offset = 0;
705                 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
706                 fifo->fifo_no = i;
707                 fifo->nic = nic;
708                 fifo->max_txds = MAX_SKB_FRAGS + 2;
709                 fifo->dev = dev;
710
711                 for (j = 0; j < page_num; j++) {
712                         int k = 0;
713                         dma_addr_t tmp_p;
714                         void *tmp_v;
715                         tmp_v = pci_alloc_consistent(nic->pdev,
716                                                      PAGE_SIZE, &tmp_p);
717                         if (!tmp_v) {
718                                 DBG_PRINT(INFO_DBG,
719                                           "pci_alloc_consistent ");
720                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
721                                 return -ENOMEM;
722                         }
723                         /* If we got a zero DMA address(can happen on
724                          * certain platforms like PPC), reallocate.
725                          * Store virtual address of page we don't want,
726                          * to be freed later.
727                          */
728                         if (!tmp_p) {
729                                 mac_control->zerodma_virt_addr = tmp_v;
730                                 DBG_PRINT(INIT_DBG,
731                                 "%s: Zero DMA address for TxDL. ", dev->name);
732                                 DBG_PRINT(INIT_DBG,
733                                 "Virtual address %p\n", tmp_v);
734                                 tmp_v = pci_alloc_consistent(nic->pdev,
735                                                      PAGE_SIZE, &tmp_p);
736                                 if (!tmp_v) {
737                                         DBG_PRINT(INFO_DBG,
738                                           "pci_alloc_consistent ");
739                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
740                                         return -ENOMEM;
741                                 }
742                                 mem_allocated += PAGE_SIZE;
743                         }
744                         while (k < lst_per_page) {
745                                 int l = (j * lst_per_page) + k;
746                                 if (l == tx_cfg->fifo_len)
747                                         break;
748                                 fifo->list_info[l].list_virt_addr =
749                                     tmp_v + (k * lst_size);
750                                 fifo->list_info[l].list_phy_addr =
751                                     tmp_p + (k * lst_size);
752                                 k++;
753                         }
754                 }
755         }
756
757         for (i = 0; i < config->tx_fifo_num; i++) {
758                 struct fifo_info *fifo = &mac_control->fifos[i];
759                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
760
761                 size = tx_cfg->fifo_len;
762                 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
763                 if (!fifo->ufo_in_band_v)
764                         return -ENOMEM;
765                 mem_allocated += (size * sizeof(u64));
766         }
767
768         /* Allocation and initialization of RXDs in Rings */
769         size = 0;
770         for (i = 0; i < config->rx_ring_num; i++) {
771                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
772                 struct ring_info *ring = &mac_control->rings[i];
773
774                 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
775                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
776                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", i);
777                         DBG_PRINT(ERR_DBG, "RxDs per Block");
778                         return FAILURE;
779                 }
780                 size += rx_cfg->num_rxd;
781                 ring->block_count = rx_cfg->num_rxd /
782                         (rxd_count[nic->rxd_mode] + 1 );
783                 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
784         }
785         if (nic->rxd_mode == RXD_MODE_1)
786                 size = (size * (sizeof(struct RxD1)));
787         else
788                 size = (size * (sizeof(struct RxD3)));
789
790         for (i = 0; i < config->rx_ring_num; i++) {
791                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
792                 struct ring_info *ring = &mac_control->rings[i];
793
794                 ring->rx_curr_get_info.block_index = 0;
795                 ring->rx_curr_get_info.offset = 0;
796                 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
797                 ring->rx_curr_put_info.block_index = 0;
798                 ring->rx_curr_put_info.offset = 0;
799                 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
800                 ring->nic = nic;
801                 ring->ring_no = i;
802                 ring->lro = lro_enable;
803
804                 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
805                 /*  Allocating all the Rx blocks */
806                 for (j = 0; j < blk_cnt; j++) {
807                         struct rx_block_info *rx_blocks;
808                         int l;
809
810                         rx_blocks = &ring->rx_blocks[j];
811                         size = SIZE_OF_BLOCK; //size is always page size
812                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
813                                                           &tmp_p_addr);
814                         if (tmp_v_addr == NULL) {
815                                 /*
816                                  * In case of failure, free_shared_mem()
817                                  * is called, which should free any
818                                  * memory that was alloced till the
819                                  * failure happened.
820                                  */
821                                 rx_blocks->block_virt_addr = tmp_v_addr;
822                                 return -ENOMEM;
823                         }
824                         mem_allocated += size;
825                         memset(tmp_v_addr, 0, size);
826                         rx_blocks->block_virt_addr = tmp_v_addr;
827                         rx_blocks->block_dma_addr = tmp_p_addr;
828                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
829                                                   rxd_count[nic->rxd_mode],
830                                                   GFP_KERNEL);
831                         if (!rx_blocks->rxds)
832                                 return -ENOMEM;
833                         mem_allocated +=
834                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
835                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
836                                 rx_blocks->rxds[l].virt_addr =
837                                         rx_blocks->block_virt_addr +
838                                         (rxd_size[nic->rxd_mode] * l);
839                                 rx_blocks->rxds[l].dma_addr =
840                                         rx_blocks->block_dma_addr +
841                                         (rxd_size[nic->rxd_mode] * l);
842                         }
843                 }
844                 /* Interlinking all Rx Blocks */
845                 for (j = 0; j < blk_cnt; j++) {
846                         int next = (j + 1) % blk_cnt;
847                         tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
848                         tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
849                         tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
850                         tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
851
852                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
853                         pre_rxd_blk->reserved_2_pNext_RxD_block =
854                             (unsigned long) tmp_v_addr_next;
855                         pre_rxd_blk->pNext_RxD_Blk_physical =
856                             (u64) tmp_p_addr_next;
857                 }
858         }
859         if (nic->rxd_mode == RXD_MODE_3B) {
860                 /*
861                  * Allocation of Storages for buffer addresses in 2BUFF mode
862                  * and the buffers as well.
863                  */
864                 for (i = 0; i < config->rx_ring_num; i++) {
865                         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
866                         struct ring_info *ring = &mac_control->rings[i];
867
868                         blk_cnt = rx_cfg->num_rxd /
869                                 (rxd_count[nic->rxd_mode]+ 1);
870                         ring->ba = kmalloc((sizeof(struct buffAdd *) * blk_cnt),
871                                            GFP_KERNEL);
872                         if (!ring->ba)
873                                 return -ENOMEM;
874                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
875                         for (j = 0; j < blk_cnt; j++) {
876                                 int k = 0;
877                                 ring->ba[j] =
878                                         kmalloc((sizeof(struct buffAdd) *
879                                                 (rxd_count[nic->rxd_mode] + 1)),
880                                                 GFP_KERNEL);
881                                 if (!ring->ba[j])
882                                         return -ENOMEM;
883                                 mem_allocated += (sizeof(struct buffAdd) *  \
884                                         (rxd_count[nic->rxd_mode] + 1));
885                                 while (k != rxd_count[nic->rxd_mode]) {
886                                         ba = &ring->ba[j][k];
887
888                                         ba->ba_0_org = (void *) kmalloc
889                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
890                                         if (!ba->ba_0_org)
891                                                 return -ENOMEM;
892                                         mem_allocated +=
893                                                 (BUF0_LEN + ALIGN_SIZE);
894                                         tmp = (unsigned long)ba->ba_0_org;
895                                         tmp += ALIGN_SIZE;
896                                         tmp &= ~((unsigned long) ALIGN_SIZE);
897                                         ba->ba_0 = (void *) tmp;
898
899                                         ba->ba_1_org = (void *) kmalloc
900                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
901                                         if (!ba->ba_1_org)
902                                                 return -ENOMEM;
903                                         mem_allocated
904                                                 += (BUF1_LEN + ALIGN_SIZE);
905                                         tmp = (unsigned long) ba->ba_1_org;
906                                         tmp += ALIGN_SIZE;
907                                         tmp &= ~((unsigned long) ALIGN_SIZE);
908                                         ba->ba_1 = (void *) tmp;
909                                         k++;
910                                 }
911                         }
912                 }
913         }
914
915         /* Allocation and initialization of Statistics block */
916         size = sizeof(struct stat_block);
917         mac_control->stats_mem = pci_alloc_consistent
918             (nic->pdev, size, &mac_control->stats_mem_phy);
919
920         if (!mac_control->stats_mem) {
921                 /*
922                  * In case of failure, free_shared_mem() is called, which
923                  * should free any memory that was alloced till the
924                  * failure happened.
925                  */
926                 return -ENOMEM;
927         }
928         mem_allocated += size;
929         mac_control->stats_mem_sz = size;
930
931         tmp_v_addr = mac_control->stats_mem;
932         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
933         memset(tmp_v_addr, 0, size);
934         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
935                   (unsigned long long) tmp_p_addr);
936         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
937         return SUCCESS;
938 }
939
940 /**
941  * free_shared_mem - Free the allocated Memory
942  * @nic:  Device private variable.
943  * Description: This function is to free all memory locations allocated by
944  * the init_shared_mem() function and return it to the kernel.
945  */
946
947 static void free_shared_mem(struct s2io_nic *nic)
948 {
949         int i, j, blk_cnt, size;
950         void *tmp_v_addr;
951         dma_addr_t tmp_p_addr;
952         struct mac_info *mac_control;
953         struct config_param *config;
954         int lst_size, lst_per_page;
955         struct net_device *dev;
956         int page_num = 0;
957
958         if (!nic)
959                 return;
960
961         dev = nic->dev;
962
963         mac_control = &nic->mac_control;
964         config = &nic->config;
965
966         lst_size = (sizeof(struct TxD) * config->max_txds);
967         lst_per_page = PAGE_SIZE / lst_size;
968
969         for (i = 0; i < config->tx_fifo_num; i++) {
970                 struct fifo_info *fifo = &mac_control->fifos[i];
971                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
972
973                 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
974                 for (j = 0; j < page_num; j++) {
975                         int mem_blks = (j * lst_per_page);
976                         struct list_info_hold *fli;
977
978                         if (!fifo->list_info)
979                                 return;
980
981                         fli = &fifo->list_info[mem_blks];
982                         if (!fli->list_virt_addr)
983                                 break;
984                         pci_free_consistent(nic->pdev, PAGE_SIZE,
985                                             fli->list_virt_addr,
986                                             fli->list_phy_addr);
987                         nic->mac_control.stats_info->sw_stat.mem_freed
988                                                 += PAGE_SIZE;
989                 }
990                 /* If we got a zero DMA address during allocation,
991                  * free the page now
992                  */
993                 if (mac_control->zerodma_virt_addr) {
994                         pci_free_consistent(nic->pdev, PAGE_SIZE,
995                                             mac_control->zerodma_virt_addr,
996                                             (dma_addr_t)0);
997                         DBG_PRINT(INIT_DBG,
998                                 "%s: Freeing TxDL with zero DMA addr. ",
999                                 dev->name);
1000                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
1001                                 mac_control->zerodma_virt_addr);
1002                         nic->mac_control.stats_info->sw_stat.mem_freed
1003                                                 += PAGE_SIZE;
1004                 }
1005                 kfree(fifo->list_info);
1006                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1007                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1008         }
1009
1010         size = SIZE_OF_BLOCK;
1011         for (i = 0; i < config->rx_ring_num; i++) {
1012                 struct ring_info *ring = &mac_control->rings[i];
1013
1014                 blk_cnt = ring->block_count;
1015                 for (j = 0; j < blk_cnt; j++) {
1016                         tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
1017                         tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1018                         if (tmp_v_addr == NULL)
1019                                 break;
1020                         pci_free_consistent(nic->pdev, size,
1021                                             tmp_v_addr, tmp_p_addr);
1022                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1023                         kfree(ring->rx_blocks[j].rxds);
1024                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1025                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1026                 }
1027         }
1028
1029         if (nic->rxd_mode == RXD_MODE_3B) {
1030                 /* Freeing buffer storage addresses in 2BUFF mode. */
1031                 for (i = 0; i < config->rx_ring_num; i++) {
1032                         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1033                         struct ring_info *ring = &mac_control->rings[i];
1034
1035                         blk_cnt = rx_cfg->num_rxd /
1036                                 (rxd_count[nic->rxd_mode] + 1);
1037                         for (j = 0; j < blk_cnt; j++) {
1038                                 int k = 0;
1039                                 if (!ring->ba[j])
1040                                         continue;
1041                                 while (k != rxd_count[nic->rxd_mode]) {
1042                                         struct buffAdd *ba = &ring->ba[j][k];
1043                                         kfree(ba->ba_0_org);
1044                                         nic->mac_control.stats_info->sw_stat.\
1045                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
1046                                         kfree(ba->ba_1_org);
1047                                         nic->mac_control.stats_info->sw_stat.\
1048                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
1049                                         k++;
1050                                 }
1051                                 kfree(ring->ba[j]);
1052                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1053                                         (sizeof(struct buffAdd) *
1054                                         (rxd_count[nic->rxd_mode] + 1));
1055                         }
1056                         kfree(ring->ba);
1057                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1058                         (sizeof(struct buffAdd *) * blk_cnt);
1059                 }
1060         }
1061
1062         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1063                 struct fifo_info *fifo = &mac_control->fifos[i];
1064                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1065
1066                 if (fifo->ufo_in_band_v) {
1067                         nic->mac_control.stats_info->sw_stat.mem_freed
1068                                 += (tx_cfg->fifo_len * sizeof(u64));
1069                         kfree(fifo->ufo_in_band_v);
1070                 }
1071         }
1072
1073         if (mac_control->stats_mem) {
1074                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1075                         mac_control->stats_mem_sz;
1076                 pci_free_consistent(nic->pdev,
1077                                     mac_control->stats_mem_sz,
1078                                     mac_control->stats_mem,
1079                                     mac_control->stats_mem_phy);
1080         }
1081 }
1082
1083 /**
1084  * s2io_verify_pci_mode -
1085  */
1086
1087 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1088 {
1089         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1090         register u64 val64 = 0;
1091         int     mode;
1092
1093         val64 = readq(&bar0->pci_mode);
1094         mode = (u8)GET_PCI_MODE(val64);
1095
1096         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1097                 return -1;      /* Unknown PCI mode */
1098         return mode;
1099 }
1100
1101 #define NEC_VENID   0x1033
1102 #define NEC_DEVID   0x0125
1103 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1104 {
1105         struct pci_dev *tdev = NULL;
1106         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1107                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1108                         if (tdev->bus == s2io_pdev->bus->parent) {
1109                                 pci_dev_put(tdev);
1110                                 return 1;
1111                         }
1112                 }
1113         }
1114         return 0;
1115 }
1116
1117 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1118 /**
1119  * s2io_print_pci_mode -
1120  */
1121 static int s2io_print_pci_mode(struct s2io_nic *nic)
1122 {
1123         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1124         register u64 val64 = 0;
1125         int     mode;
1126         struct config_param *config = &nic->config;
1127
1128         val64 = readq(&bar0->pci_mode);
1129         mode = (u8)GET_PCI_MODE(val64);
1130
1131         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1132                 return -1;      /* Unknown PCI mode */
1133
1134         config->bus_speed = bus_speed[mode];
1135
1136         if (s2io_on_nec_bridge(nic->pdev)) {
1137                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1138                                                         nic->dev->name);
1139                 return mode;
1140         }
1141
1142         if (val64 & PCI_MODE_32_BITS) {
1143                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1144         } else {
1145                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1146         }
1147
1148         switch(mode) {
1149                 case PCI_MODE_PCI_33:
1150                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1151                         break;
1152                 case PCI_MODE_PCI_66:
1153                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1154                         break;
1155                 case PCI_MODE_PCIX_M1_66:
1156                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1157                         break;
1158                 case PCI_MODE_PCIX_M1_100:
1159                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1160                         break;
1161                 case PCI_MODE_PCIX_M1_133:
1162                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1163                         break;
1164                 case PCI_MODE_PCIX_M2_66:
1165                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1166                         break;
1167                 case PCI_MODE_PCIX_M2_100:
1168                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1169                         break;
1170                 case PCI_MODE_PCIX_M2_133:
1171                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1172                         break;
1173                 default:
1174                         return -1;      /* Unsupported bus speed */
1175         }
1176
1177         return mode;
1178 }
1179
1180 /**
1181  *  init_tti - Initialization transmit traffic interrupt scheme
1182  *  @nic: device private variable
1183  *  @link: link status (UP/DOWN) used to enable/disable continuous
1184  *  transmit interrupts
1185  *  Description: The function configures transmit traffic interrupts
1186  *  Return Value:  SUCCESS on success and
1187  *  '-1' on failure
1188  */
1189
1190 static int init_tti(struct s2io_nic *nic, int link)
1191 {
1192         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1193         register u64 val64 = 0;
1194         int i;
1195         struct config_param *config;
1196
1197         config = &nic->config;
1198
1199         for (i = 0; i < config->tx_fifo_num; i++) {
1200                 /*
1201                  * TTI Initialization. Default Tx timer gets us about
1202                  * 250 interrupts per sec. Continuous interrupts are enabled
1203                  * by default.
1204                  */
1205                 if (nic->device_type == XFRAME_II_DEVICE) {
1206                         int count = (nic->config.bus_speed * 125)/2;
1207                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1208                 } else
1209                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1210
1211                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1212                                 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1213                                 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1214                                 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1215                 if (i == 0)
1216                         if (use_continuous_tx_intrs && (link == LINK_UP))
1217                                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1218                 writeq(val64, &bar0->tti_data1_mem);
1219
1220                 if (nic->config.intr_type == MSI_X) {
1221                         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1222                                 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1223                                 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1224                                 TTI_DATA2_MEM_TX_UFC_D(0x300);
1225                 } else {
1226                         if ((nic->config.tx_steering_type ==
1227                                 TX_DEFAULT_STEERING) &&
1228                                 (config->tx_fifo_num > 1) &&
1229                                 (i >= nic->udp_fifo_idx) &&
1230                                 (i < (nic->udp_fifo_idx +
1231                                 nic->total_udp_fifos)))
1232                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1233                                         TTI_DATA2_MEM_TX_UFC_B(0x80) |
1234                                         TTI_DATA2_MEM_TX_UFC_C(0x100) |
1235                                         TTI_DATA2_MEM_TX_UFC_D(0x120);
1236                         else
1237                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1238                                         TTI_DATA2_MEM_TX_UFC_B(0x20) |
1239                                         TTI_DATA2_MEM_TX_UFC_C(0x40) |
1240                                         TTI_DATA2_MEM_TX_UFC_D(0x80);
1241                 }
1242
1243                 writeq(val64, &bar0->tti_data2_mem);
1244
1245                 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1246                                 TTI_CMD_MEM_OFFSET(i);
1247                 writeq(val64, &bar0->tti_command_mem);
1248
1249                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1250                         TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1251                         return FAILURE;
1252         }
1253
1254         return SUCCESS;
1255 }
1256
1257 /**
1258  *  init_nic - Initialization of hardware
1259  *  @nic: device private variable
1260  *  Description: The function sequentially configures every block
1261  *  of the H/W from their reset values.
1262  *  Return Value:  SUCCESS on success and
1263  *  '-1' on failure (endian settings incorrect).
1264  */
1265
1266 static int init_nic(struct s2io_nic *nic)
1267 {
1268         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1269         struct net_device *dev = nic->dev;
1270         register u64 val64 = 0;
1271         void __iomem *add;
1272         u32 time;
1273         int i, j;
1274         struct mac_info *mac_control;
1275         struct config_param *config;
1276         int dtx_cnt = 0;
1277         unsigned long long mem_share;
1278         int mem_size;
1279
1280         mac_control = &nic->mac_control;
1281         config = &nic->config;
1282
1283         /* to set the swapper controle on the card */
1284         if(s2io_set_swapper(nic)) {
1285                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1286                 return -EIO;
1287         }
1288
1289         /*
1290          * Herc requires EOI to be removed from reset before XGXS, so..
1291          */
1292         if (nic->device_type & XFRAME_II_DEVICE) {
1293                 val64 = 0xA500000000ULL;
1294                 writeq(val64, &bar0->sw_reset);
1295                 msleep(500);
1296                 val64 = readq(&bar0->sw_reset);
1297         }
1298
1299         /* Remove XGXS from reset state */
1300         val64 = 0;
1301         writeq(val64, &bar0->sw_reset);
1302         msleep(500);
1303         val64 = readq(&bar0->sw_reset);
1304
1305         /* Ensure that it's safe to access registers by checking
1306          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1307          */
1308         if (nic->device_type == XFRAME_II_DEVICE) {
1309                 for (i = 0; i < 50; i++) {
1310                         val64 = readq(&bar0->adapter_status);
1311                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1312                                 break;
1313                         msleep(10);
1314                 }
1315                 if (i == 50)
1316                         return -ENODEV;
1317         }
1318
1319         /*  Enable Receiving broadcasts */
1320         add = &bar0->mac_cfg;
1321         val64 = readq(&bar0->mac_cfg);
1322         val64 |= MAC_RMAC_BCAST_ENABLE;
1323         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1324         writel((u32) val64, add);
1325         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1326         writel((u32) (val64 >> 32), (add + 4));
1327
1328         /* Read registers in all blocks */
1329         val64 = readq(&bar0->mac_int_mask);
1330         val64 = readq(&bar0->mc_int_mask);
1331         val64 = readq(&bar0->xgxs_int_mask);
1332
1333         /*  Set MTU */
1334         val64 = dev->mtu;
1335         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1336
1337         if (nic->device_type & XFRAME_II_DEVICE) {
1338                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1339                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1340                                           &bar0->dtx_control, UF);
1341                         if (dtx_cnt & 0x1)
1342                                 msleep(1); /* Necessary!! */
1343                         dtx_cnt++;
1344                 }
1345         } else {
1346                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1347                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1348                                           &bar0->dtx_control, UF);
1349                         val64 = readq(&bar0->dtx_control);
1350                         dtx_cnt++;
1351                 }
1352         }
1353
1354         /*  Tx DMA Initialization */
1355         val64 = 0;
1356         writeq(val64, &bar0->tx_fifo_partition_0);
1357         writeq(val64, &bar0->tx_fifo_partition_1);
1358         writeq(val64, &bar0->tx_fifo_partition_2);
1359         writeq(val64, &bar0->tx_fifo_partition_3);
1360
1361
1362         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1363                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1364
1365                 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1366                         vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1367
1368                 if (i == (config->tx_fifo_num - 1)) {
1369                         if (i % 2 == 0)
1370                                 i++;
1371                 }
1372
1373                 switch (i) {
1374                 case 1:
1375                         writeq(val64, &bar0->tx_fifo_partition_0);
1376                         val64 = 0;
1377                         j = 0;
1378                         break;
1379                 case 3:
1380                         writeq(val64, &bar0->tx_fifo_partition_1);
1381                         val64 = 0;
1382                         j = 0;
1383                         break;
1384                 case 5:
1385                         writeq(val64, &bar0->tx_fifo_partition_2);
1386                         val64 = 0;
1387                         j = 0;
1388                         break;
1389                 case 7:
1390                         writeq(val64, &bar0->tx_fifo_partition_3);
1391                         val64 = 0;
1392                         j = 0;
1393                         break;
1394                 default:
1395                         j++;
1396                         break;
1397                 }
1398         }
1399
1400         /*
1401          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1402          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1403          */
1404         if ((nic->device_type == XFRAME_I_DEVICE) &&
1405                 (nic->pdev->revision < 4))
1406                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1407
1408         val64 = readq(&bar0->tx_fifo_partition_0);
1409         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1410                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1411
1412         /*
1413          * Initialization of Tx_PA_CONFIG register to ignore packet
1414          * integrity checking.
1415          */
1416         val64 = readq(&bar0->tx_pa_cfg);
1417         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1418             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1419         writeq(val64, &bar0->tx_pa_cfg);
1420
1421         /* Rx DMA intialization. */
1422         val64 = 0;
1423         for (i = 0; i < config->rx_ring_num; i++) {
1424                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1425
1426                 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1427         }
1428         writeq(val64, &bar0->rx_queue_priority);
1429
1430         /*
1431          * Allocating equal share of memory to all the
1432          * configured Rings.
1433          */
1434         val64 = 0;
1435         if (nic->device_type & XFRAME_II_DEVICE)
1436                 mem_size = 32;
1437         else
1438                 mem_size = 64;
1439
1440         for (i = 0; i < config->rx_ring_num; i++) {
1441                 switch (i) {
1442                 case 0:
1443                         mem_share = (mem_size / config->rx_ring_num +
1444                                      mem_size % config->rx_ring_num);
1445                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1446                         continue;
1447                 case 1:
1448                         mem_share = (mem_size / config->rx_ring_num);
1449                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1450                         continue;
1451                 case 2:
1452                         mem_share = (mem_size / config->rx_ring_num);
1453                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1454                         continue;
1455                 case 3:
1456                         mem_share = (mem_size / config->rx_ring_num);
1457                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1458                         continue;
1459                 case 4:
1460                         mem_share = (mem_size / config->rx_ring_num);
1461                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1462                         continue;
1463                 case 5:
1464                         mem_share = (mem_size / config->rx_ring_num);
1465                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1466                         continue;
1467                 case 6:
1468                         mem_share = (mem_size / config->rx_ring_num);
1469                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1470                         continue;
1471                 case 7:
1472                         mem_share = (mem_size / config->rx_ring_num);
1473                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1474                         continue;
1475                 }
1476         }
1477         writeq(val64, &bar0->rx_queue_cfg);
1478
1479         /*
1480          * Filling Tx round robin registers
1481          * as per the number of FIFOs for equal scheduling priority
1482          */
1483         switch (config->tx_fifo_num) {
1484         case 1:
1485                 val64 = 0x0;
1486                 writeq(val64, &bar0->tx_w_round_robin_0);
1487                 writeq(val64, &bar0->tx_w_round_robin_1);
1488                 writeq(val64, &bar0->tx_w_round_robin_2);
1489                 writeq(val64, &bar0->tx_w_round_robin_3);
1490                 writeq(val64, &bar0->tx_w_round_robin_4);
1491                 break;
1492         case 2:
1493                 val64 = 0x0001000100010001ULL;
1494                 writeq(val64, &bar0->tx_w_round_robin_0);
1495                 writeq(val64, &bar0->tx_w_round_robin_1);
1496                 writeq(val64, &bar0->tx_w_round_robin_2);
1497                 writeq(val64, &bar0->tx_w_round_robin_3);
1498                 val64 = 0x0001000100000000ULL;
1499                 writeq(val64, &bar0->tx_w_round_robin_4);
1500                 break;
1501         case 3:
1502                 val64 = 0x0001020001020001ULL;
1503                 writeq(val64, &bar0->tx_w_round_robin_0);
1504                 val64 = 0x0200010200010200ULL;
1505                 writeq(val64, &bar0->tx_w_round_robin_1);
1506                 val64 = 0x0102000102000102ULL;
1507                 writeq(val64, &bar0->tx_w_round_robin_2);
1508                 val64 = 0x0001020001020001ULL;
1509                 writeq(val64, &bar0->tx_w_round_robin_3);
1510                 val64 = 0x0200010200000000ULL;
1511                 writeq(val64, &bar0->tx_w_round_robin_4);
1512                 break;
1513         case 4:
1514                 val64 = 0x0001020300010203ULL;
1515                 writeq(val64, &bar0->tx_w_round_robin_0);
1516                 writeq(val64, &bar0->tx_w_round_robin_1);
1517                 writeq(val64, &bar0->tx_w_round_robin_2);
1518                 writeq(val64, &bar0->tx_w_round_robin_3);
1519                 val64 = 0x0001020300000000ULL;
1520                 writeq(val64, &bar0->tx_w_round_robin_4);
1521                 break;
1522         case 5:
1523                 val64 = 0x0001020304000102ULL;
1524                 writeq(val64, &bar0->tx_w_round_robin_0);
1525                 val64 = 0x0304000102030400ULL;
1526                 writeq(val64, &bar0->tx_w_round_robin_1);
1527                 val64 = 0x0102030400010203ULL;
1528                 writeq(val64, &bar0->tx_w_round_robin_2);
1529                 val64 = 0x0400010203040001ULL;
1530                 writeq(val64, &bar0->tx_w_round_robin_3);
1531                 val64 = 0x0203040000000000ULL;
1532                 writeq(val64, &bar0->tx_w_round_robin_4);
1533                 break;
1534         case 6:
1535                 val64 = 0x0001020304050001ULL;
1536                 writeq(val64, &bar0->tx_w_round_robin_0);
1537                 val64 = 0x0203040500010203ULL;
1538                 writeq(val64, &bar0->tx_w_round_robin_1);
1539                 val64 = 0x0405000102030405ULL;
1540                 writeq(val64, &bar0->tx_w_round_robin_2);
1541                 val64 = 0x0001020304050001ULL;
1542                 writeq(val64, &bar0->tx_w_round_robin_3);
1543                 val64 = 0x0203040500000000ULL;
1544                 writeq(val64, &bar0->tx_w_round_robin_4);
1545                 break;
1546         case 7:
1547                 val64 = 0x0001020304050600ULL;
1548                 writeq(val64, &bar0->tx_w_round_robin_0);
1549                 val64 = 0x0102030405060001ULL;
1550                 writeq(val64, &bar0->tx_w_round_robin_1);
1551                 val64 = 0x0203040506000102ULL;
1552                 writeq(val64, &bar0->tx_w_round_robin_2);
1553                 val64 = 0x0304050600010203ULL;
1554                 writeq(val64, &bar0->tx_w_round_robin_3);
1555                 val64 = 0x0405060000000000ULL;
1556                 writeq(val64, &bar0->tx_w_round_robin_4);
1557                 break;
1558         case 8:
1559                 val64 = 0x0001020304050607ULL;
1560                 writeq(val64, &bar0->tx_w_round_robin_0);
1561                 writeq(val64, &bar0->tx_w_round_robin_1);
1562                 writeq(val64, &bar0->tx_w_round_robin_2);
1563                 writeq(val64, &bar0->tx_w_round_robin_3);
1564                 val64 = 0x0001020300000000ULL;
1565                 writeq(val64, &bar0->tx_w_round_robin_4);
1566                 break;
1567         }
1568
1569         /* Enable all configured Tx FIFO partitions */
1570         val64 = readq(&bar0->tx_fifo_partition_0);
1571         val64 |= (TX_FIFO_PARTITION_EN);
1572         writeq(val64, &bar0->tx_fifo_partition_0);
1573
1574         /* Filling the Rx round robin registers as per the
1575          * number of Rings and steering based on QoS with
1576          * equal priority.
1577          */
1578         switch (config->rx_ring_num) {
1579         case 1:
1580                 val64 = 0x0;
1581                 writeq(val64, &bar0->rx_w_round_robin_0);
1582                 writeq(val64, &bar0->rx_w_round_robin_1);
1583                 writeq(val64, &bar0->rx_w_round_robin_2);
1584                 writeq(val64, &bar0->rx_w_round_robin_3);
1585                 writeq(val64, &bar0->rx_w_round_robin_4);
1586
1587                 val64 = 0x8080808080808080ULL;
1588                 writeq(val64, &bar0->rts_qos_steering);
1589                 break;
1590         case 2:
1591                 val64 = 0x0001000100010001ULL;
1592                 writeq(val64, &bar0->rx_w_round_robin_0);
1593                 writeq(val64, &bar0->rx_w_round_robin_1);
1594                 writeq(val64, &bar0->rx_w_round_robin_2);
1595                 writeq(val64, &bar0->rx_w_round_robin_3);
1596                 val64 = 0x0001000100000000ULL;
1597                 writeq(val64, &bar0->rx_w_round_robin_4);
1598
1599                 val64 = 0x8080808040404040ULL;
1600                 writeq(val64, &bar0->rts_qos_steering);
1601                 break;
1602         case 3:
1603                 val64 = 0x0001020001020001ULL;
1604                 writeq(val64, &bar0->rx_w_round_robin_0);
1605                 val64 = 0x0200010200010200ULL;
1606                 writeq(val64, &bar0->rx_w_round_robin_1);
1607                 val64 = 0x0102000102000102ULL;
1608                 writeq(val64, &bar0->rx_w_round_robin_2);
1609                 val64 = 0x0001020001020001ULL;
1610                 writeq(val64, &bar0->rx_w_round_robin_3);
1611                 val64 = 0x0200010200000000ULL;
1612                 writeq(val64, &bar0->rx_w_round_robin_4);
1613
1614                 val64 = 0x8080804040402020ULL;
1615                 writeq(val64, &bar0->rts_qos_steering);
1616                 break;
1617         case 4:
1618                 val64 = 0x0001020300010203ULL;
1619                 writeq(val64, &bar0->rx_w_round_robin_0);
1620                 writeq(val64, &bar0->rx_w_round_robin_1);
1621                 writeq(val64, &bar0->rx_w_round_robin_2);
1622                 writeq(val64, &bar0->rx_w_round_robin_3);
1623                 val64 = 0x0001020300000000ULL;
1624                 writeq(val64, &bar0->rx_w_round_robin_4);
1625
1626                 val64 = 0x8080404020201010ULL;
1627                 writeq(val64, &bar0->rts_qos_steering);
1628                 break;
1629         case 5:
1630                 val64 = 0x0001020304000102ULL;
1631                 writeq(val64, &bar0->rx_w_round_robin_0);
1632                 val64 = 0x0304000102030400ULL;
1633                 writeq(val64, &bar0->rx_w_round_robin_1);
1634                 val64 = 0x0102030400010203ULL;
1635                 writeq(val64, &bar0->rx_w_round_robin_2);
1636                 val64 = 0x0400010203040001ULL;
1637                 writeq(val64, &bar0->rx_w_round_robin_3);
1638                 val64 = 0x0203040000000000ULL;
1639                 writeq(val64, &bar0->rx_w_round_robin_4);
1640
1641                 val64 = 0x8080404020201008ULL;
1642                 writeq(val64, &bar0->rts_qos_steering);
1643                 break;
1644         case 6:
1645                 val64 = 0x0001020304050001ULL;
1646                 writeq(val64, &bar0->rx_w_round_robin_0);
1647                 val64 = 0x0203040500010203ULL;
1648                 writeq(val64, &bar0->rx_w_round_robin_1);
1649                 val64 = 0x0405000102030405ULL;
1650                 writeq(val64, &bar0->rx_w_round_robin_2);
1651                 val64 = 0x0001020304050001ULL;
1652                 writeq(val64, &bar0->rx_w_round_robin_3);
1653                 val64 = 0x0203040500000000ULL;
1654                 writeq(val64, &bar0->rx_w_round_robin_4);
1655
1656                 val64 = 0x8080404020100804ULL;
1657                 writeq(val64, &bar0->rts_qos_steering);
1658                 break;
1659         case 7:
1660                 val64 = 0x0001020304050600ULL;
1661                 writeq(val64, &bar0->rx_w_round_robin_0);
1662                 val64 = 0x0102030405060001ULL;
1663                 writeq(val64, &bar0->rx_w_round_robin_1);
1664                 val64 = 0x0203040506000102ULL;
1665                 writeq(val64, &bar0->rx_w_round_robin_2);
1666                 val64 = 0x0304050600010203ULL;
1667                 writeq(val64, &bar0->rx_w_round_robin_3);
1668                 val64 = 0x0405060000000000ULL;
1669                 writeq(val64, &bar0->rx_w_round_robin_4);
1670
1671                 val64 = 0x8080402010080402ULL;
1672                 writeq(val64, &bar0->rts_qos_steering);
1673                 break;
1674         case 8:
1675                 val64 = 0x0001020304050607ULL;
1676                 writeq(val64, &bar0->rx_w_round_robin_0);
1677                 writeq(val64, &bar0->rx_w_round_robin_1);
1678                 writeq(val64, &bar0->rx_w_round_robin_2);
1679                 writeq(val64, &bar0->rx_w_round_robin_3);
1680                 val64 = 0x0001020300000000ULL;
1681                 writeq(val64, &bar0->rx_w_round_robin_4);
1682
1683                 val64 = 0x8040201008040201ULL;
1684                 writeq(val64, &bar0->rts_qos_steering);
1685                 break;
1686         }
1687
1688         /* UDP Fix */
1689         val64 = 0;
1690         for (i = 0; i < 8; i++)
1691                 writeq(val64, &bar0->rts_frm_len_n[i]);
1692
1693         /* Set the default rts frame length for the rings configured */
1694         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1695         for (i = 0 ; i < config->rx_ring_num ; i++)
1696                 writeq(val64, &bar0->rts_frm_len_n[i]);
1697
1698         /* Set the frame length for the configured rings
1699          * desired by the user
1700          */
1701         for (i = 0; i < config->rx_ring_num; i++) {
1702                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1703                  * specified frame length steering.
1704                  * If the user provides the frame length then program
1705                  * the rts_frm_len register for those values or else
1706                  * leave it as it is.
1707                  */
1708                 if (rts_frm_len[i] != 0) {
1709                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1710                                 &bar0->rts_frm_len_n[i]);
1711                 }
1712         }
1713
1714         /* Disable differentiated services steering logic */
1715         for (i = 0; i < 64; i++) {
1716                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1717                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1718                                 dev->name);
1719                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1720                         return -ENODEV;
1721                 }
1722         }
1723
1724         /* Program statistics memory */
1725         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1726
1727         if (nic->device_type == XFRAME_II_DEVICE) {
1728                 val64 = STAT_BC(0x320);
1729                 writeq(val64, &bar0->stat_byte_cnt);
1730         }
1731
1732         /*
1733          * Initializing the sampling rate for the device to calculate the
1734          * bandwidth utilization.
1735          */
1736         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1737             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1738         writeq(val64, &bar0->mac_link_util);
1739
1740         /*
1741          * Initializing the Transmit and Receive Traffic Interrupt
1742          * Scheme.
1743          */
1744
1745         /* Initialize TTI */
1746         if (SUCCESS != init_tti(nic, nic->last_link_state))
1747                 return -ENODEV;
1748
1749         /* RTI Initialization */
1750         if (nic->device_type == XFRAME_II_DEVICE) {
1751                 /*
1752                  * Programmed to generate Apprx 500 Intrs per
1753                  * second
1754                  */
1755                 int count = (nic->config.bus_speed * 125)/4;
1756                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1757         } else
1758                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1759         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1760                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1761                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1762
1763         writeq(val64, &bar0->rti_data1_mem);
1764
1765         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1766                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1767         if (nic->config.intr_type == MSI_X)
1768             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1769                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1770         else
1771             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1772                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1773         writeq(val64, &bar0->rti_data2_mem);
1774
1775         for (i = 0; i < config->rx_ring_num; i++) {
1776                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1777                                 | RTI_CMD_MEM_OFFSET(i);
1778                 writeq(val64, &bar0->rti_command_mem);
1779
1780                 /*
1781                  * Once the operation completes, the Strobe bit of the
1782                  * command register will be reset. We poll for this
1783                  * particular condition. We wait for a maximum of 500ms
1784                  * for the operation to complete, if it's not complete
1785                  * by then we return error.
1786                  */
1787                 time = 0;
1788                 while (true) {
1789                         val64 = readq(&bar0->rti_command_mem);
1790                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1791                                 break;
1792
1793                         if (time > 10) {
1794                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1795                                           dev->name);
1796                                 return -ENODEV;
1797                         }
1798                         time++;
1799                         msleep(50);
1800                 }
1801         }
1802
1803         /*
1804          * Initializing proper values as Pause threshold into all
1805          * the 8 Queues on Rx side.
1806          */
1807         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1808         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1809
1810         /* Disable RMAC PAD STRIPPING */
1811         add = &bar0->mac_cfg;
1812         val64 = readq(&bar0->mac_cfg);
1813         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1814         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1815         writel((u32) (val64), add);
1816         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1817         writel((u32) (val64 >> 32), (add + 4));
1818         val64 = readq(&bar0->mac_cfg);
1819
1820         /* Enable FCS stripping by adapter */
1821         add = &bar0->mac_cfg;
1822         val64 = readq(&bar0->mac_cfg);
1823         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1824         if (nic->device_type == XFRAME_II_DEVICE)
1825                 writeq(val64, &bar0->mac_cfg);
1826         else {
1827                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1828                 writel((u32) (val64), add);
1829                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1830                 writel((u32) (val64 >> 32), (add + 4));
1831         }
1832
1833         /*
1834          * Set the time value to be inserted in the pause frame
1835          * generated by xena.
1836          */
1837         val64 = readq(&bar0->rmac_pause_cfg);
1838         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1839         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1840         writeq(val64, &bar0->rmac_pause_cfg);
1841
1842         /*
1843          * Set the Threshold Limit for Generating the pause frame
1844          * If the amount of data in any Queue exceeds ratio of
1845          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1846          * pause frame is generated
1847          */
1848         val64 = 0;
1849         for (i = 0; i < 4; i++) {
1850                 val64 |=
1851                     (((u64) 0xFF00 | nic->mac_control.
1852                       mc_pause_threshold_q0q3)
1853                      << (i * 2 * 8));
1854         }
1855         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1856
1857         val64 = 0;
1858         for (i = 0; i < 4; i++) {
1859                 val64 |=
1860                     (((u64) 0xFF00 | nic->mac_control.
1861                       mc_pause_threshold_q4q7)
1862                      << (i * 2 * 8));
1863         }
1864         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1865
1866         /*
1867          * TxDMA will stop Read request if the number of read split has
1868          * exceeded the limit pointed by shared_splits
1869          */
1870         val64 = readq(&bar0->pic_control);
1871         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1872         writeq(val64, &bar0->pic_control);
1873
1874         if (nic->config.bus_speed == 266) {
1875                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1876                 writeq(0x0, &bar0->read_retry_delay);
1877                 writeq(0x0, &bar0->write_retry_delay);
1878         }
1879
1880         /*
1881          * Programming the Herc to split every write transaction
1882          * that does not start on an ADB to reduce disconnects.
1883          */
1884         if (nic->device_type == XFRAME_II_DEVICE) {
1885                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1886                         MISC_LINK_STABILITY_PRD(3);
1887                 writeq(val64, &bar0->misc_control);
1888                 val64 = readq(&bar0->pic_control2);
1889                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1890                 writeq(val64, &bar0->pic_control2);
1891         }
1892         if (strstr(nic->product_name, "CX4")) {
1893                 val64 = TMAC_AVG_IPG(0x17);
1894                 writeq(val64, &bar0->tmac_avg_ipg);
1895         }
1896
1897         return SUCCESS;
1898 }
1899 #define LINK_UP_DOWN_INTERRUPT          1
1900 #define MAC_RMAC_ERR_TIMER              2
1901
1902 static int s2io_link_fault_indication(struct s2io_nic *nic)
1903 {
1904         if (nic->device_type == XFRAME_II_DEVICE)
1905                 return LINK_UP_DOWN_INTERRUPT;
1906         else
1907                 return MAC_RMAC_ERR_TIMER;
1908 }
1909
1910 /**
1911  *  do_s2io_write_bits -  update alarm bits in alarm register
1912  *  @value: alarm bits
1913  *  @flag: interrupt status
1914  *  @addr: address value
1915  *  Description: update alarm bits in alarm register
1916  *  Return Value:
1917  *  NONE.
1918  */
1919 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1920 {
1921         u64 temp64;
1922
1923         temp64 = readq(addr);
1924
1925         if(flag == ENABLE_INTRS)
1926                 temp64 &= ~((u64) value);
1927         else
1928                 temp64 |= ((u64) value);
1929         writeq(temp64, addr);
1930 }
1931
1932 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1933 {
1934         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1935         register u64 gen_int_mask = 0;
1936         u64 interruptible;
1937
1938         writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1939         if (mask & TX_DMA_INTR) {
1940
1941                 gen_int_mask |= TXDMA_INT_M;
1942
1943                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1944                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1945                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1946                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1947
1948                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1949                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1950                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1951                                 &bar0->pfc_err_mask);
1952
1953                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1954                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1955                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1956
1957                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1958                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1959                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1960                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1961                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1962                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1963
1964                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1965                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1966
1967                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1968                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1969                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1970                                 flag, &bar0->lso_err_mask);
1971
1972                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1973                                 flag, &bar0->tpa_err_mask);
1974
1975                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1976
1977         }
1978
1979         if (mask & TX_MAC_INTR) {
1980                 gen_int_mask |= TXMAC_INT_M;
1981                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1982                                 &bar0->mac_int_mask);
1983                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1984                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1985                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1986                                 flag, &bar0->mac_tmac_err_mask);
1987         }
1988
1989         if (mask & TX_XGXS_INTR) {
1990                 gen_int_mask |= TXXGXS_INT_M;
1991                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1992                                 &bar0->xgxs_int_mask);
1993                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1994                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1995                                 flag, &bar0->xgxs_txgxs_err_mask);
1996         }
1997
1998         if (mask & RX_DMA_INTR) {
1999                 gen_int_mask |= RXDMA_INT_M;
2000                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
2001                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
2002                                 flag, &bar0->rxdma_int_mask);
2003                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
2004                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
2005                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
2006                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
2007                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
2008                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
2009                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2010                                 &bar0->prc_pcix_err_mask);
2011                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2012                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2013                                 &bar0->rpa_err_mask);
2014                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2015                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2016                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2017                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2018                                 flag, &bar0->rda_err_mask);
2019                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2020                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2021                                 flag, &bar0->rti_err_mask);
2022         }
2023
2024         if (mask & RX_MAC_INTR) {
2025                 gen_int_mask |= RXMAC_INT_M;
2026                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2027                                 &bar0->mac_int_mask);
2028                 interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2029                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2030                                 RMAC_DOUBLE_ECC_ERR;
2031                 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2032                         interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2033                 do_s2io_write_bits(interruptible,
2034                                 flag, &bar0->mac_rmac_err_mask);
2035         }
2036
2037         if (mask & RX_XGXS_INTR)
2038         {
2039                 gen_int_mask |= RXXGXS_INT_M;
2040                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2041                                 &bar0->xgxs_int_mask);
2042                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2043                                 &bar0->xgxs_rxgxs_err_mask);
2044         }
2045
2046         if (mask & MC_INTR) {
2047                 gen_int_mask |= MC_INT_M;
2048                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2049                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2050                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2051                                 &bar0->mc_err_mask);
2052         }
2053         nic->general_int_mask = gen_int_mask;
2054
2055         /* Remove this line when alarm interrupts are enabled */
2056         nic->general_int_mask = 0;
2057 }
2058 /**
2059  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2060  *  @nic: device private variable,
2061  *  @mask: A mask indicating which Intr block must be modified and,
2062  *  @flag: A flag indicating whether to enable or disable the Intrs.
2063  *  Description: This function will either disable or enable the interrupts
2064  *  depending on the flag argument. The mask argument can be used to
2065  *  enable/disable any Intr block.
2066  *  Return Value: NONE.
2067  */
2068
2069 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2070 {
2071         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2072         register u64 temp64 = 0, intr_mask = 0;
2073
2074         intr_mask = nic->general_int_mask;
2075
2076         /*  Top level interrupt classification */
2077         /*  PIC Interrupts */
2078         if (mask & TX_PIC_INTR) {
2079                 /*  Enable PIC Intrs in the general intr mask register */
2080                 intr_mask |= TXPIC_INT_M;
2081                 if (flag == ENABLE_INTRS) {
2082                         /*
2083                          * If Hercules adapter enable GPIO otherwise
2084                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2085                          * interrupts for now.
2086                          * TODO
2087                          */
2088                         if (s2io_link_fault_indication(nic) ==
2089                                         LINK_UP_DOWN_INTERRUPT ) {
2090                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2091                                                 &bar0->pic_int_mask);
2092                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2093                                                 &bar0->gpio_int_mask);
2094                         } else
2095                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2096                 } else if (flag == DISABLE_INTRS) {
2097                         /*
2098                          * Disable PIC Intrs in the general
2099                          * intr mask register
2100                          */
2101                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2102                 }
2103         }
2104
2105         /*  Tx traffic interrupts */
2106         if (mask & TX_TRAFFIC_INTR) {
2107                 intr_mask |= TXTRAFFIC_INT_M;
2108                 if (flag == ENABLE_INTRS) {
2109                         /*
2110                          * Enable all the Tx side interrupts
2111                          * writing 0 Enables all 64 TX interrupt levels
2112                          */
2113                         writeq(0x0, &bar0->tx_traffic_mask);
2114                 } else if (flag == DISABLE_INTRS) {
2115                         /*
2116                          * Disable Tx Traffic Intrs in the general intr mask
2117                          * register.
2118                          */
2119                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2120                 }
2121         }
2122
2123         /*  Rx traffic interrupts */
2124         if (mask & RX_TRAFFIC_INTR) {
2125                 intr_mask |= RXTRAFFIC_INT_M;
2126                 if (flag == ENABLE_INTRS) {
2127                         /* writing 0 Enables all 8 RX interrupt levels */
2128                         writeq(0x0, &bar0->rx_traffic_mask);
2129                 } else if (flag == DISABLE_INTRS) {
2130                         /*
2131                          * Disable Rx Traffic Intrs in the general intr mask
2132                          * register.
2133                          */
2134                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2135                 }
2136         }
2137
2138         temp64 = readq(&bar0->general_int_mask);
2139         if (flag == ENABLE_INTRS)
2140                 temp64 &= ~((u64) intr_mask);
2141         else
2142                 temp64 = DISABLE_ALL_INTRS;
2143         writeq(temp64, &bar0->general_int_mask);
2144
2145         nic->general_int_mask = readq(&bar0->general_int_mask);
2146 }
2147
2148 /**
2149  *  verify_pcc_quiescent- Checks for PCC quiescent state
2150  *  Return: 1 If PCC is quiescence
2151  *          0 If PCC is not quiescence
2152  */
2153 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2154 {
2155         int ret = 0, herc;
2156         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2157         u64 val64 = readq(&bar0->adapter_status);
2158
2159         herc = (sp->device_type == XFRAME_II_DEVICE);
2160
2161         if (flag == false) {
2162                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2163                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2164                                 ret = 1;
2165                 } else {
2166                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2167                                 ret = 1;
2168                 }
2169         } else {
2170                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2171                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2172                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2173                                 ret = 1;
2174                 } else {
2175                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2176                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2177                                 ret = 1;
2178                 }
2179         }
2180
2181         return ret;
2182 }
2183 /**
2184  *  verify_xena_quiescence - Checks whether the H/W is ready
2185  *  Description: Returns whether the H/W is ready to go or not. Depending
2186  *  on whether adapter enable bit was written or not the comparison
2187  *  differs and the calling function passes the input argument flag to
2188  *  indicate this.
2189  *  Return: 1 If xena is quiescence
2190  *          0 If Xena is not quiescence
2191  */
2192
2193 static int verify_xena_quiescence(struct s2io_nic *sp)
2194 {
2195         int  mode;
2196         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2197         u64 val64 = readq(&bar0->adapter_status);
2198         mode = s2io_verify_pci_mode(sp);
2199
2200         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2201                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2202                 return 0;
2203         }
2204         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2205         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2206                 return 0;
2207         }
2208         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2209                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2210                 return 0;
2211         }
2212         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2213                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2214                 return 0;
2215         }
2216         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2217                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2218                 return 0;
2219         }
2220         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2221                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2222                 return 0;
2223         }
2224         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2225                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2226                 return 0;
2227         }
2228         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2229                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2230                 return 0;
2231         }
2232
2233         /*
2234          * In PCI 33 mode, the P_PLL is not used, and therefore,
2235          * the the P_PLL_LOCK bit in the adapter_status register will
2236          * not be asserted.
2237          */
2238         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2239                 sp->device_type == XFRAME_II_DEVICE && mode !=
2240                 PCI_MODE_PCI_33) {
2241                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2242                 return 0;
2243         }
2244         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2245                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2246                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2247                 return 0;
2248         }
2249         return 1;
2250 }
2251
2252 /**
2253  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2254  * @sp: Pointer to device specifc structure
2255  * Description :
2256  * New procedure to clear mac address reading  problems on Alpha platforms
2257  *
2258  */
2259
2260 static void fix_mac_address(struct s2io_nic * sp)
2261 {
2262         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2263         u64 val64;
2264         int i = 0;
2265
2266         while (fix_mac[i] != END_SIGN) {
2267                 writeq(fix_mac[i++], &bar0->gpio_control);
2268                 udelay(10);
2269                 val64 = readq(&bar0->gpio_control);
2270         }
2271 }
2272
2273 /**
2274  *  start_nic - Turns the device on
2275  *  @nic : device private variable.
2276  *  Description:
2277  *  This function actually turns the device on. Before this  function is
2278  *  called,all Registers are configured from their reset states
2279  *  and shared memory is allocated but the NIC is still quiescent. On
2280  *  calling this function, the device interrupts are cleared and the NIC is
2281  *  literally switched on by writing into the adapter control register.
2282  *  Return Value:
2283  *  SUCCESS on success and -1 on failure.
2284  */
2285
2286 static int start_nic(struct s2io_nic *nic)
2287 {
2288         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2289         struct net_device *dev = nic->dev;
2290         register u64 val64 = 0;
2291         u16 subid, i;
2292         struct mac_info *mac_control;
2293         struct config_param *config;
2294
2295         mac_control = &nic->mac_control;
2296         config = &nic->config;
2297
2298         /*  PRC Initialization and configuration */
2299         for (i = 0; i < config->rx_ring_num; i++) {
2300                 struct ring_info *ring = &mac_control->rings[i];
2301
2302                 writeq((u64) ring->rx_blocks[0].block_dma_addr,
2303                        &bar0->prc_rxd0_n[i]);
2304
2305                 val64 = readq(&bar0->prc_ctrl_n[i]);
2306                 if (nic->rxd_mode == RXD_MODE_1)
2307                         val64 |= PRC_CTRL_RC_ENABLED;
2308                 else
2309                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2310                 if (nic->device_type == XFRAME_II_DEVICE)
2311                         val64 |= PRC_CTRL_GROUP_READS;
2312                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2313                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2314                 writeq(val64, &bar0->prc_ctrl_n[i]);
2315         }
2316
2317         if (nic->rxd_mode == RXD_MODE_3B) {
2318                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2319                 val64 = readq(&bar0->rx_pa_cfg);
2320                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2321                 writeq(val64, &bar0->rx_pa_cfg);
2322         }
2323
2324         if (vlan_tag_strip == 0) {
2325                 val64 = readq(&bar0->rx_pa_cfg);
2326                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2327                 writeq(val64, &bar0->rx_pa_cfg);
2328                 nic->vlan_strip_flag = 0;
2329         }
2330
2331         /*
2332          * Enabling MC-RLDRAM. After enabling the device, we timeout
2333          * for around 100ms, which is approximately the time required
2334          * for the device to be ready for operation.
2335          */
2336         val64 = readq(&bar0->mc_rldram_mrs);
2337         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2338         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2339         val64 = readq(&bar0->mc_rldram_mrs);
2340
2341         msleep(100);    /* Delay by around 100 ms. */
2342
2343         /* Enabling ECC Protection. */
2344         val64 = readq(&bar0->adapter_control);
2345         val64 &= ~ADAPTER_ECC_EN;
2346         writeq(val64, &bar0->adapter_control);
2347
2348         /*
2349          * Verify if the device is ready to be enabled, if so enable
2350          * it.
2351          */
2352         val64 = readq(&bar0->adapter_status);
2353         if (!verify_xena_quiescence(nic)) {
2354                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2355                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2356                           (unsigned long long) val64);
2357                 return FAILURE;
2358         }
2359
2360         /*
2361          * With some switches, link might be already up at this point.
2362          * Because of this weird behavior, when we enable laser,
2363          * we may not get link. We need to handle this. We cannot
2364          * figure out which switch is misbehaving. So we are forced to
2365          * make a global change.
2366          */
2367
2368         /* Enabling Laser. */
2369         val64 = readq(&bar0->adapter_control);
2370         val64 |= ADAPTER_EOI_TX_ON;
2371         writeq(val64, &bar0->adapter_control);
2372
2373         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2374                 /*
2375                  * Dont see link state interrupts initally on some switches,
2376                  * so directly scheduling the link state task here.
2377                  */
2378                 schedule_work(&nic->set_link_task);
2379         }
2380         /* SXE-002: Initialize link and activity LED */
2381         subid = nic->pdev->subsystem_device;
2382         if (((subid & 0xFF) >= 0x07) &&
2383             (nic->device_type == XFRAME_I_DEVICE)) {
2384                 val64 = readq(&bar0->gpio_control);
2385                 val64 |= 0x0000800000000000ULL;
2386                 writeq(val64, &bar0->gpio_control);
2387                 val64 = 0x0411040400000000ULL;
2388                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2389         }
2390
2391         return SUCCESS;
2392 }
2393 /**
2394  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2395  */
2396 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2397                                         TxD *txdlp, int get_off)
2398 {
2399         struct s2io_nic *nic = fifo_data->nic;
2400         struct sk_buff *skb;
2401         struct TxD *txds;
2402         u16 j, frg_cnt;
2403
2404         txds = txdlp;
2405         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2406                 pci_unmap_single(nic->pdev, (dma_addr_t)
2407                         txds->Buffer_Pointer, sizeof(u64),
2408                         PCI_DMA_TODEVICE);
2409                 txds++;
2410         }
2411
2412         skb = (struct sk_buff *) ((unsigned long)
2413                         txds->Host_Control);
2414         if (!skb) {
2415                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2416                 return NULL;
2417         }
2418         pci_unmap_single(nic->pdev, (dma_addr_t)
2419                          txds->Buffer_Pointer,
2420                          skb->len - skb->data_len,
2421                          PCI_DMA_TODEVICE);
2422         frg_cnt = skb_shinfo(skb)->nr_frags;
2423         if (frg_cnt) {
2424                 txds++;
2425                 for (j = 0; j < frg_cnt; j++, txds++) {
2426                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2427                         if (!txds->Buffer_Pointer)
2428                                 break;
2429                         pci_unmap_page(nic->pdev, (dma_addr_t)
2430                                         txds->Buffer_Pointer,
2431                                        frag->size, PCI_DMA_TODEVICE);
2432                 }
2433         }
2434         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2435         return(skb);
2436 }
2437
2438 /**
2439  *  free_tx_buffers - Free all queued Tx buffers
2440  *  @nic : device private variable.
2441  *  Description:
2442  *  Free all queued Tx buffers.
2443  *  Return Value: void
2444 */
2445
2446 static void free_tx_buffers(struct s2io_nic *nic)
2447 {
2448         struct net_device *dev = nic->dev;
2449         struct sk_buff *skb;
2450         struct TxD *txdp;
2451         int i, j;
2452         struct mac_info *mac_control;
2453         struct config_param *config;
2454         int cnt = 0;
2455
2456         mac_control = &nic->mac_control;
2457         config = &nic->config;
2458
2459         for (i = 0; i < config->tx_fifo_num; i++) {
2460                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2461                 struct fifo_info *fifo = &mac_control->fifos[i];
2462                 unsigned long flags;
2463
2464                 spin_lock_irqsave(&fifo->tx_lock, flags);
2465                 for (j = 0; j < tx_cfg->fifo_len; j++) {
2466                         txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
2467                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2468                         if (skb) {
2469                                 nic->mac_control.stats_info->sw_stat.mem_freed
2470                                         += skb->truesize;
2471                                 dev_kfree_skb(skb);
2472                                 cnt++;
2473                         }
2474                 }
2475                 DBG_PRINT(INTR_DBG,
2476                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2477                           dev->name, cnt, i);
2478                 fifo->tx_curr_get_info.offset = 0;
2479                 fifo->tx_curr_put_info.offset = 0;
2480                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2481         }
2482 }
2483
2484 /**
2485  *   stop_nic -  To stop the nic
2486  *   @nic ; device private variable.
2487  *   Description:
2488  *   This function does exactly the opposite of what the start_nic()
2489  *   function does. This function is called to stop the device.
2490  *   Return Value:
2491  *   void.
2492  */
2493
2494 static void stop_nic(struct s2io_nic *nic)
2495 {
2496         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2497         register u64 val64 = 0;
2498         u16 interruptible;
2499         struct mac_info *mac_control;
2500         struct config_param *config;
2501
2502         mac_control = &nic->mac_control;
2503         config = &nic->config;
2504
2505         /*  Disable all interrupts */
2506         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2507         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2508         interruptible |= TX_PIC_INTR;
2509         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2510
2511         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2512         val64 = readq(&bar0->adapter_control);
2513         val64 &= ~(ADAPTER_CNTL_EN);
2514         writeq(val64, &bar0->adapter_control);
2515 }
2516
2517 /**
2518  *  fill_rx_buffers - Allocates the Rx side skbs
2519  *  @ring_info: per ring structure
2520  *  @from_card_up: If this is true, we will map the buffer to get
2521  *     the dma address for buf0 and buf1 to give it to the card.
2522  *     Else we will sync the already mapped buffer to give it to the card.
2523  *  Description:
2524  *  The function allocates Rx side skbs and puts the physical
2525  *  address of these buffers into the RxD buffer pointers, so that the NIC
2526  *  can DMA the received frame into these locations.
2527  *  The NIC supports 3 receive modes, viz
2528  *  1. single buffer,
2529  *  2. three buffer and
2530  *  3. Five buffer modes.
2531  *  Each mode defines how many fragments the received frame will be split
2532  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2533  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2534  *  is split into 3 fragments. As of now only single buffer mode is
2535  *  supported.
2536  *   Return Value:
2537  *  SUCCESS on success or an appropriate -ve value on failure.
2538  */
2539 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2540                                 int from_card_up)
2541 {
2542         struct sk_buff *skb;
2543         struct RxD_t *rxdp;
2544         int off, size, block_no, block_no1;
2545         u32 alloc_tab = 0;
2546         u32 alloc_cnt;
2547         u64 tmp;
2548         struct buffAdd *ba;
2549         struct RxD_t *first_rxdp = NULL;
2550         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2551         int rxd_index = 0;
2552         struct RxD1 *rxdp1;
2553         struct RxD3 *rxdp3;
2554         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2555
2556         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2557
2558         block_no1 = ring->rx_curr_get_info.block_index;
2559         while (alloc_tab < alloc_cnt) {
2560                 block_no = ring->rx_curr_put_info.block_index;
2561
2562                 off = ring->rx_curr_put_info.offset;
2563
2564                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2565
2566                 rxd_index = off + 1;
2567                 if (block_no)
2568                         rxd_index += (block_no * ring->rxd_count);
2569
2570                 if ((block_no == block_no1) &&
2571                         (off == ring->rx_curr_get_info.offset) &&
2572                         (rxdp->Host_Control)) {
2573                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2574                                 ring->dev->name);
2575                         DBG_PRINT(INTR_DBG, " info equated\n");
2576                         goto end;
2577                 }
2578                 if (off && (off == ring->rxd_count)) {
2579                         ring->rx_curr_put_info.block_index++;
2580                         if (ring->rx_curr_put_info.block_index ==
2581                                                         ring->block_count)
2582                                 ring->rx_curr_put_info.block_index = 0;
2583                         block_no = ring->rx_curr_put_info.block_index;
2584                         off = 0;
2585                         ring->rx_curr_put_info.offset = off;
2586                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2587                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2588                                   ring->dev->name, rxdp);
2589
2590                 }
2591
2592                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2593                         ((ring->rxd_mode == RXD_MODE_3B) &&
2594                                 (rxdp->Control_2 & s2BIT(0)))) {
2595                         ring->rx_curr_put_info.offset = off;
2596                         goto end;
2597                 }
2598                 /* calculate size of skb based on ring mode */
2599                 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2600                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2601                 if (ring->rxd_mode == RXD_MODE_1)
2602                         size += NET_IP_ALIGN;
2603                 else
2604                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2605
2606                 /* allocate skb */
2607                 skb = dev_alloc_skb(size);
2608                 if(!skb) {
2609                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2610                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2611                         if (first_rxdp) {
2612                                 wmb();
2613                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2614                         }
2615                         stats->mem_alloc_fail_cnt++;
2616
2617                         return -ENOMEM ;
2618                 }
2619                 stats->mem_allocated += skb->truesize;
2620
2621                 if (ring->rxd_mode == RXD_MODE_1) {
2622                         /* 1 buffer mode - normal operation mode */
2623                         rxdp1 = (struct RxD1*)rxdp;
2624                         memset(rxdp, 0, sizeof(struct RxD1));
2625                         skb_reserve(skb, NET_IP_ALIGN);
2626                         rxdp1->Buffer0_ptr = pci_map_single
2627                             (ring->pdev, skb->data, size - NET_IP_ALIGN,
2628                                 PCI_DMA_FROMDEVICE);
2629                         if (pci_dma_mapping_error(nic->pdev,
2630                                                 rxdp1->Buffer0_ptr))
2631                                 goto pci_map_failed;
2632
2633                         rxdp->Control_2 =
2634                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2635                         rxdp->Host_Control = (unsigned long) (skb);
2636                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2637                         /*
2638                          * 2 buffer mode -
2639                          * 2 buffer mode provides 128
2640                          * byte aligned receive buffers.
2641                          */
2642
2643                         rxdp3 = (struct RxD3*)rxdp;
2644                         /* save buffer pointers to avoid frequent dma mapping */
2645                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2646                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2647                         memset(rxdp, 0, sizeof(struct RxD3));
2648                         /* restore the buffer pointers for dma sync*/
2649                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2650                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2651
2652                         ba = &ring->ba[block_no][off];
2653                         skb_reserve(skb, BUF0_LEN);
2654                         tmp = (u64)(unsigned long) skb->data;
2655                         tmp += ALIGN_SIZE;
2656                         tmp &= ~ALIGN_SIZE;
2657                         skb->data = (void *) (unsigned long)tmp;
2658                         skb_reset_tail_pointer(skb);
2659
2660                         if (from_card_up) {
2661                                 rxdp3->Buffer0_ptr =
2662                                    pci_map_single(ring->pdev, ba->ba_0,
2663                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
2664                         if (pci_dma_mapping_error(nic->pdev,
2665                                                 rxdp3->Buffer0_ptr))
2666                                         goto pci_map_failed;
2667                         } else
2668                                 pci_dma_sync_single_for_device(ring->pdev,
2669                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2670                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2671
2672                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2673                         if (ring->rxd_mode == RXD_MODE_3B) {
2674                                 /* Two buffer mode */
2675
2676                                 /*
2677                                  * Buffer2 will have L3/L4 header plus
2678                                  * L4 payload
2679                                  */
2680                                 rxdp3->Buffer2_ptr = pci_map_single
2681                                 (ring->pdev, skb->data, ring->mtu + 4,
2682                                                 PCI_DMA_FROMDEVICE);
2683
2684                                 if (pci_dma_mapping_error(nic->pdev,
2685                                                         rxdp3->Buffer2_ptr))
2686                                         goto pci_map_failed;
2687
2688                                 if (from_card_up) {
2689                                         rxdp3->Buffer1_ptr =
2690                                                 pci_map_single(ring->pdev,
2691                                                 ba->ba_1, BUF1_LEN,
2692                                                 PCI_DMA_FROMDEVICE);
2693
2694                                         if (pci_dma_mapping_error(nic->pdev,
2695                                                 rxdp3->Buffer1_ptr)) {
2696                                                 pci_unmap_single
2697                                                         (ring->pdev,
2698                                                     (dma_addr_t)(unsigned long)
2699                                                         skb->data,
2700                                                         ring->mtu + 4,
2701                                                         PCI_DMA_FROMDEVICE);
2702                                                 goto pci_map_failed;
2703                                         }
2704                                 }
2705                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2706                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2707                                                                 (ring->mtu + 4);
2708                         }
2709                         rxdp->Control_2 |= s2BIT(0);
2710                         rxdp->Host_Control = (unsigned long) (skb);
2711                 }
2712                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2713                         rxdp->Control_1 |= RXD_OWN_XENA;
2714                 off++;
2715                 if (off == (ring->rxd_count + 1))
2716                         off = 0;
2717                 ring->rx_curr_put_info.offset = off;
2718
2719                 rxdp->Control_2 |= SET_RXD_MARKER;
2720                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2721                         if (first_rxdp) {
2722                                 wmb();
2723                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2724                         }
2725                         first_rxdp = rxdp;
2726                 }
2727                 ring->rx_bufs_left += 1;
2728                 alloc_tab++;
2729         }
2730
2731       end:
2732         /* Transfer ownership of first descriptor to adapter just before
2733          * exiting. Before that, use memory barrier so that ownership
2734          * and other fields are seen by adapter correctly.
2735          */
2736         if (first_rxdp) {
2737                 wmb();
2738                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2739         }
2740
2741         return SUCCESS;
2742 pci_map_failed:
2743         stats->pci_map_fail_cnt++;
2744         stats->mem_freed += skb->truesize;
2745         dev_kfree_skb_irq(skb);
2746         return -ENOMEM;
2747 }
2748
2749 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2750 {
2751         struct net_device *dev = sp->dev;
2752         int j;
2753         struct sk_buff *skb;
2754         struct RxD_t *rxdp;
2755         struct mac_info *mac_control;
2756         struct buffAdd *ba;
2757         struct RxD1 *rxdp1;
2758         struct RxD3 *rxdp3;
2759
2760         mac_control = &sp->mac_control;
2761         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2762                 rxdp = mac_control->rings[ring_no].
2763                                 rx_blocks[blk].rxds[j].virt_addr;
2764                 skb = (struct sk_buff *)
2765                         ((unsigned long) rxdp->Host_Control);
2766                 if (!skb) {
2767                         continue;
2768                 }
2769                 if (sp->rxd_mode == RXD_MODE_1) {
2770                         rxdp1 = (struct RxD1*)rxdp;
2771                         pci_unmap_single(sp->pdev, (dma_addr_t)
2772                                 rxdp1->Buffer0_ptr,
2773                                 dev->mtu +
2774                                 HEADER_ETHERNET_II_802_3_SIZE
2775                                 + HEADER_802_2_SIZE +
2776                                 HEADER_SNAP_SIZE,
2777                                 PCI_DMA_FROMDEVICE);
2778                         memset(rxdp, 0, sizeof(struct RxD1));
2779                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2780                         rxdp3 = (struct RxD3*)rxdp;
2781                         ba = &mac_control->rings[ring_no].
2782                                 ba[blk][j];
2783                         pci_unmap_single(sp->pdev, (dma_addr_t)
2784                                 rxdp3->Buffer0_ptr,
2785                                 BUF0_LEN,
2786                                 PCI_DMA_FROMDEVICE);
2787                         pci_unmap_single(sp->pdev, (dma_addr_t)
2788                                 rxdp3->Buffer1_ptr,
2789                                 BUF1_LEN,
2790                                 PCI_DMA_FROMDEVICE);
2791                         pci_unmap_single(sp->pdev, (dma_addr_t)
2792                                 rxdp3->Buffer2_ptr,
2793                                 dev->mtu + 4,
2794                                 PCI_DMA_FROMDEVICE);
2795                         memset(rxdp, 0, sizeof(struct RxD3));
2796                 }
2797                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2798                 dev_kfree_skb(skb);
2799                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2800         }
2801 }
2802
2803 /**
2804  *  free_rx_buffers - Frees all Rx buffers
2805  *  @sp: device private variable.
2806  *  Description:
2807  *  This function will free all Rx buffers allocated by host.
2808  *  Return Value:
2809  *  NONE.
2810  */
2811
2812 static void free_rx_buffers(struct s2io_nic *sp)
2813 {
2814         struct net_device *dev = sp->dev;
2815         int i, blk = 0, buf_cnt = 0;
2816         struct mac_info *mac_control;
2817         struct config_param *config;
2818
2819         mac_control = &sp->mac_control;
2820         config = &sp->config;
2821
2822         for (i = 0; i < config->rx_ring_num; i++) {
2823                 struct ring_info *ring = &mac_control->rings[i];
2824
2825                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2826                         free_rxd_blk(sp,i,blk);
2827
2828                 ring->rx_curr_put_info.block_index = 0;
2829                 ring->rx_curr_get_info.block_index = 0;
2830                 ring->rx_curr_put_info.offset = 0;
2831                 ring->rx_curr_get_info.offset = 0;
2832                 ring->rx_bufs_left = 0;
2833                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2834                           dev->name, buf_cnt, i);
2835         }
2836 }
2837
2838 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2839 {
2840         if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2841                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2842                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2843         }
2844         return 0;
2845 }
2846
2847 /**
2848  * s2io_poll - Rx interrupt handler for NAPI support
2849  * @napi : pointer to the napi structure.
2850  * @budget : The number of packets that were budgeted to be processed
2851  * during  one pass through the 'Poll" function.
2852  * Description:
2853  * Comes into picture only if NAPI support has been incorporated. It does
2854  * the same thing that rx_intr_handler does, but not in a interrupt context
2855  * also It will process only a given number of packets.
2856  * Return value:
2857  * 0 on success and 1 if there are No Rx packets to be processed.
2858  */
2859
2860 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2861 {
2862         struct ring_info *ring = container_of(napi, struct ring_info, napi);
2863         struct net_device *dev = ring->dev;
2864         struct config_param *config;
2865         struct mac_info *mac_control;
2866         int pkts_processed = 0;
2867         u8 __iomem *addr = NULL;
2868         u8 val8 = 0;
2869         struct s2io_nic *nic = netdev_priv(dev);
2870         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2871         int budget_org = budget;
2872
2873         config = &nic->config;
2874         mac_control = &nic->mac_control;
2875
2876         if (unlikely(!is_s2io_card_up(nic)))
2877                 return 0;
2878
2879         pkts_processed = rx_intr_handler(ring, budget);
2880         s2io_chk_rx_buffers(nic, ring);
2881
2882         if (pkts_processed < budget_org) {
2883                 napi_complete(napi);
2884                 /*Re Enable MSI-Rx Vector*/
2885                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2886                 addr += 7 - ring->ring_no;
2887                 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2888                 writeb(val8, addr);
2889                 val8 = readb(addr);
2890         }
2891         return pkts_processed;
2892 }
2893 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2894 {
2895         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2896         struct config_param *config;
2897         struct mac_info *mac_control;
2898         int pkts_processed = 0;
2899         int ring_pkts_processed, i;
2900         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2901         int budget_org = budget;
2902
2903         config = &nic->config;
2904         mac_control = &nic->mac_control;
2905
2906         if (unlikely(!is_s2io_card_up(nic)))
2907                 return 0;
2908
2909         for (i = 0; i < config->rx_ring_num; i++) {
2910                 struct ring_info *ring = &mac_control->rings[i];
2911                 ring_pkts_processed = rx_intr_handler(ring, budget);
2912                 s2io_chk_rx_buffers(nic, ring);
2913                 pkts_processed += ring_pkts_processed;
2914                 budget -= ring_pkts_processed;
2915                 if (budget <= 0)
2916                         break;
2917         }
2918         if (pkts_processed < budget_org) {
2919                 napi_complete(napi);
2920                 /* Re enable the Rx interrupts for the ring */
2921                 writeq(0, &bar0->rx_traffic_mask);
2922                 readl(&bar0->rx_traffic_mask);
2923         }
2924         return pkts_processed;
2925 }
2926
2927 #ifdef CONFIG_NET_POLL_CONTROLLER
2928 /**
2929  * s2io_netpoll - netpoll event handler entry point
2930  * @dev : pointer to the device structure.
2931  * Description:
2932  *      This function will be called by upper layer to check for events on the
2933  * interface in situations where interrupts are disabled. It is used for
2934  * specific in-kernel networking tasks, such as remote consoles and kernel
2935  * debugging over the network (example netdump in RedHat).
2936  */
2937 static void s2io_netpoll(struct net_device *dev)
2938 {
2939         struct s2io_nic *nic = netdev_priv(dev);
2940         struct mac_info *mac_control;
2941         struct config_param *config;
2942         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2943         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2944         int i;
2945
2946         if (pci_channel_offline(nic->pdev))
2947                 return;
2948
2949         disable_irq(dev->irq);
2950
2951         mac_control = &nic->mac_control;
2952         config = &nic->config;
2953
2954         writeq(val64, &bar0->rx_traffic_int);
2955         writeq(val64, &bar0->tx_traffic_int);
2956
2957         /* we need to free up the transmitted skbufs or else netpoll will
2958          * run out of skbs and will fail and eventually netpoll application such
2959          * as netdump will fail.
2960          */
2961         for (i = 0; i < config->tx_fifo_num; i++)
2962                 tx_intr_handler(&mac_control->fifos[i]);
2963
2964         /* check for received packet and indicate up to network */
2965         for (i = 0; i < config->rx_ring_num; i++) {
2966                 struct ring_info *ring = &mac_control->rings[i];
2967
2968                 rx_intr_handler(ring, 0);
2969         }
2970
2971         for (i = 0; i < config->rx_ring_num; i++) {
2972                 struct ring_info *ring = &mac_control->rings[i];
2973
2974                 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2975                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2976                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2977                         break;
2978                 }
2979         }
2980         enable_irq(dev->irq);
2981         return;
2982 }
2983 #endif
2984
2985 /**
2986  *  rx_intr_handler - Rx interrupt handler
2987  *  @ring_info: per ring structure.
2988  *  @budget: budget for napi processing.
2989  *  Description:
2990  *  If the interrupt is because of a received frame or if the
2991  *  receive ring contains fresh as yet un-processed frames,this function is
2992  *  called. It picks out the RxD at which place the last Rx processing had
2993  *  stopped and sends the skb to the OSM's Rx handler and then increments
2994  *  the offset.
2995  *  Return Value:
2996  *  No. of napi packets processed.
2997  */
2998 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2999 {
3000         int get_block, put_block;
3001         struct rx_curr_get_info get_info, put_info;
3002         struct RxD_t *rxdp;
3003         struct sk_buff *skb;
3004         int pkt_cnt = 0, napi_pkts = 0;
3005         int i;
3006         struct RxD1* rxdp1;
3007         struct RxD3* rxdp3;
3008
3009         get_info = ring_data->rx_curr_get_info;
3010         get_block = get_info.block_index;
3011         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
3012         put_block = put_info.block_index;
3013         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
3014
3015         while (RXD_IS_UP2DT(rxdp)) {
3016                 /*
3017                  * If your are next to put index then it's
3018                  * FIFO full condition
3019                  */
3020                 if ((get_block == put_block) &&
3021                     (get_info.offset + 1) == put_info.offset) {
3022                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
3023                                 ring_data->dev->name);
3024                         break;
3025                 }
3026                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3027                 if (skb == NULL) {
3028                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
3029                                   ring_data->dev->name);
3030                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3031                         return 0;
3032                 }
3033                 if (ring_data->rxd_mode == RXD_MODE_1) {
3034                         rxdp1 = (struct RxD1*)rxdp;
3035                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3036                                 rxdp1->Buffer0_ptr,
3037                                 ring_data->mtu +
3038                                 HEADER_ETHERNET_II_802_3_SIZE +
3039                                 HEADER_802_2_SIZE +
3040                                 HEADER_SNAP_SIZE,
3041                                 PCI_DMA_FROMDEVICE);
3042                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3043                         rxdp3 = (struct RxD3*)rxdp;
3044                         pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3045                                 rxdp3->Buffer0_ptr,
3046                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
3047                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3048                                 rxdp3->Buffer2_ptr,
3049                                 ring_data->mtu + 4,
3050                                 PCI_DMA_FROMDEVICE);
3051                 }
3052                 prefetch(skb->data);
3053                 rx_osm_handler(ring_data, rxdp);
3054                 get_info.offset++;
3055                 ring_data->rx_curr_get_info.offset = get_info.offset;
3056                 rxdp = ring_data->rx_blocks[get_block].
3057                                 rxds[get_info.offset].virt_addr;
3058                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3059                         get_info.offset = 0;
3060                         ring_data->rx_curr_get_info.offset = get_info.offset;
3061                         get_block++;
3062                         if (get_block == ring_data->block_count)
3063                                 get_block = 0;
3064                         ring_data->rx_curr_get_info.block_index = get_block;
3065                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3066                 }
3067
3068                 if (ring_data->nic->config.napi) {
3069                         budget--;
3070                         napi_pkts++;
3071                         if (!budget)
3072                                 break;
3073                 }
3074                 pkt_cnt++;
3075                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3076                         break;
3077         }
3078         if (ring_data->lro) {
3079                 /* Clear all LRO sessions before exiting */
3080                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3081                         struct lro *lro = &ring_data->lro0_n[i];
3082                         if (lro->in_use) {
3083                                 update_L3L4_header(ring_data->nic, lro);
3084                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3085                                 clear_lro_session(lro);
3086                         }
3087                 }
3088         }
3089         return(napi_pkts);
3090 }
3091
3092 /**
3093  *  tx_intr_handler - Transmit interrupt handler
3094  *  @nic : device private variable
3095  *  Description:
3096  *  If an interrupt was raised to indicate DMA complete of the
3097  *  Tx packet, this function is called. It identifies the last TxD
3098  *  whose buffer was freed and frees all skbs whose data have already
3099  *  DMA'ed into the NICs internal memory.
3100  *  Return Value:
3101  *  NONE
3102  */
3103
3104 static void tx_intr_handler(struct fifo_info *fifo_data)
3105 {
3106         struct s2io_nic *nic = fifo_data->nic;
3107         struct tx_curr_get_info get_info, put_info;
3108         struct sk_buff *skb = NULL;
3109         struct TxD *txdlp;
3110         int pkt_cnt = 0;
3111         unsigned long flags = 0;
3112         u8 err_mask;
3113
3114         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3115                         return;
3116
3117         get_info = fifo_data->tx_curr_get_info;
3118         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3119         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3120             list_virt_addr;
3121         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3122                (get_info.offset != put_info.offset) &&
3123                (txdlp->Host_Control)) {
3124                 /* Check for TxD errors */
3125                 if (txdlp->Control_1 & TXD_T_CODE) {
3126                         unsigned long long err;
3127                         err = txdlp->Control_1 & TXD_T_CODE;
3128                         if (err & 0x1) {
3129                                 nic->mac_control.stats_info->sw_stat.
3130                                                 parity_err_cnt++;
3131                         }
3132
3133                         /* update t_code statistics */
3134                         err_mask = err >> 48;
3135                         switch(err_mask) {
3136                                 case 2:
3137                                         nic->mac_control.stats_info->sw_stat.
3138                                                         tx_buf_abort_cnt++;
3139                                 break;
3140
3141                                 case 3:
3142                                         nic->mac_control.stats_info->sw_stat.
3143                                                         tx_desc_abort_cnt++;
3144                                 break;
3145
3146                                 case 7:
3147                                         nic->mac_control.stats_info->sw_stat.
3148                                                         tx_parity_err_cnt++;
3149                                 break;
3150
3151                                 case 10:
3152                                         nic->mac_control.stats_info->sw_stat.
3153                                                         tx_link_loss_cnt++;
3154                                 break;
3155
3156                                 case 15:
3157                                         nic->mac_control.stats_info->sw_stat.
3158                                                         tx_list_proc_err_cnt++;
3159                                 break;
3160                         }
3161                 }
3162
3163                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3164                 if (skb == NULL) {
3165                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3166                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3167                         __func__);
3168                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3169                         return;
3170                 }
3171                 pkt_cnt++;
3172
3173                 /* Updating the statistics block */
3174                 nic->dev->stats.tx_bytes += skb->len;
3175                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3176                 dev_kfree_skb_irq(skb);
3177
3178                 get_info.offset++;
3179                 if (get_info.offset == get_info.fifo_len + 1)
3180                         get_info.offset = 0;
3181                 txdlp = (struct TxD *) fifo_data->list_info
3182                     [get_info.offset].list_virt_addr;
3183                 fifo_data->tx_curr_get_info.offset =
3184                     get_info.offset;
3185         }
3186
3187         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3188
3189         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3190 }
3191
3192 /**
3193  *  s2io_mdio_write - Function to write in to MDIO registers
3194  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3195  *  @addr     : address value
3196  *  @value    : data value
3197  *  @dev      : pointer to net_device structure
3198  *  Description:
3199  *  This function is used to write values to the MDIO registers
3200  *  NONE
3201  */
3202 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3203 {
3204         u64 val64 = 0x0;
3205         struct s2io_nic *sp = netdev_priv(dev);
3206         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3207
3208         //address transaction
3209         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3210                         | MDIO_MMD_DEV_ADDR(mmd_type)
3211                         | MDIO_MMS_PRT_ADDR(0x0);
3212         writeq(val64, &bar0->mdio_control);
3213         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3214         writeq(val64, &bar0->mdio_control);
3215         udelay(100);
3216
3217         //Data transaction
3218         val64 = 0x0;
3219         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3220                         | MDIO_MMD_DEV_ADDR(mmd_type)
3221                         | MDIO_MMS_PRT_ADDR(0x0)
3222                         | MDIO_MDIO_DATA(value)
3223                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3224         writeq(val64, &bar0->mdio_control);
3225         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3226         writeq(val64, &bar0->mdio_control);
3227         udelay(100);
3228
3229         val64 = 0x0;
3230         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3231         | MDIO_MMD_DEV_ADDR(mmd_type)
3232         | MDIO_MMS_PRT_ADDR(0x0)
3233         | MDIO_OP(MDIO_OP_READ_TRANS);
3234         writeq(val64, &bar0->mdio_control);
3235         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3236         writeq(val64, &bar0->mdio_control);
3237         udelay(100);
3238
3239 }
3240
3241 /**
3242  *  s2io_mdio_read - Function to write in to MDIO registers
3243  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3244  *  @addr     : address value
3245  *  @dev      : pointer to net_device structure
3246  *  Description:
3247  *  This function is used to read values to the MDIO registers
3248  *  NONE
3249  */
3250 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3251 {
3252         u64 val64 = 0x0;
3253         u64 rval64 = 0x0;
3254         struct s2io_nic *sp = netdev_priv(dev);
3255         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3256
3257         /* address transaction */
3258         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3259                         | MDIO_MMD_DEV_ADDR(mmd_type)
3260                         | MDIO_MMS_PRT_ADDR(0x0);
3261         writeq(val64, &bar0->mdio_control);
3262         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3263         writeq(val64, &bar0->mdio_control);
3264         udelay(100);
3265
3266         /* Data transaction */
3267         val64 = 0x0;
3268         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3269                         | MDIO_MMD_DEV_ADDR(mmd_type)
3270                         | MDIO_MMS_PRT_ADDR(0x0)
3271                         | MDIO_OP(MDIO_OP_READ_TRANS);
3272         writeq(val64, &bar0->mdio_control);
3273         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3274         writeq(val64, &bar0->mdio_control);
3275         udelay(100);
3276
3277         /* Read the value from regs */
3278         rval64 = readq(&bar0->mdio_control);
3279         rval64 = rval64 & 0xFFFF0000;
3280         rval64 = rval64 >> 16;
3281         return rval64;
3282 }
3283 /**
3284  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3285  *  @counter      : couter value to be updated
3286  *  @flag         : flag to indicate the status
3287  *  @type         : counter type
3288  *  Description:
3289  *  This function is to check the status of the xpak counters value
3290  *  NONE
3291  */
3292
3293 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3294 {
3295         u64 mask = 0x3;
3296         u64 val64;
3297         int i;
3298         for(i = 0; i <index; i++)
3299                 mask = mask << 0x2;
3300
3301         if(flag > 0)
3302         {
3303                 *counter = *counter + 1;
3304                 val64 = *regs_stat & mask;
3305                 val64 = val64 >> (index * 0x2);
3306                 val64 = val64 + 1;
3307                 if(val64 == 3)
3308                 {
3309                         switch(type)
3310                         {
3311                         case 1:
3312                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3313                                           "service. Excessive temperatures may "
3314                                           "result in premature transceiver "
3315                                           "failure \n");
3316                         break;
3317                         case 2:
3318                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3319                                           "service Excessive bias currents may "
3320                                           "indicate imminent laser diode "
3321                                           "failure \n");
3322                         break;
3323                         case 3:
3324                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3325                                           "service Excessive laser output "
3326                                           "power may saturate far-end "
3327                                           "receiver\n");
3328                         break;
3329                         default:
3330                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3331                                           "type \n");
3332                         }
3333                         val64 = 0x0;
3334                 }
3335                 val64 = val64 << (index * 0x2);
3336                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3337
3338         } else {
3339                 *regs_stat = *regs_stat & (~mask);
3340         }
3341 }
3342
3343 /**
3344  *  s2io_updt_xpak_counter - Function to update the xpak counters
3345  *  @dev         : pointer to net_device struct
3346  *  Description:
3347  *  This function is to upate the status of the xpak counters value
3348  *  NONE
3349  */
3350 static void s2io_updt_xpak_counter(struct net_device *dev)
3351 {
3352         u16 flag  = 0x0;
3353         u16 type  = 0x0;
3354         u16 val16 = 0x0;
3355         u64 val64 = 0x0;
3356         u64 addr  = 0x0;
3357
3358         struct s2io_nic *sp = netdev_priv(dev);
3359         struct stat_block *stat_info = sp->mac_control.stats_info;
3360
3361         /* Check the communication with the MDIO slave */
3362         addr = MDIO_CTRL1;
3363         val64 = 0x0;
3364         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3365         if((val64 == 0xFFFF) || (val64 == 0x0000))
3366         {
3367                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3368                           "Returned %llx\n", (unsigned long long)val64);
3369                 return;
3370         }
3371
3372         /* Check for the expected value of control reg 1 */
3373         if(val64 != MDIO_CTRL1_SPEED10G)
3374         {
3375                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3376                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x%x\n",
3377                           (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3378                 return;
3379         }
3380
3381         /* Loading the DOM register to MDIO register */
3382         addr = 0xA100;
3383         s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3384         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3385
3386         /* Reading the Alarm flags */
3387         addr = 0xA070;
3388         val64 = 0x0;
3389         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3390
3391         flag = CHECKBIT(val64, 0x7);
3392         type = 1;
3393         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3394                                 &stat_info->xpak_stat.xpak_regs_stat,
3395                                 0x0, flag, type);
3396
3397         if(CHECKBIT(val64, 0x6))
3398                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3399
3400         flag = CHECKBIT(val64, 0x3);
3401         type = 2;
3402         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3403                                 &stat_info->xpak_stat.xpak_regs_stat,
3404                                 0x2, flag, type);
3405
3406         if(CHECKBIT(val64, 0x2))
3407                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3408
3409         flag = CHECKBIT(val64, 0x1);
3410         type = 3;
3411         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3412                                 &stat_info->xpak_stat.xpak_regs_stat,
3413                                 0x4, flag, type);
3414
3415         if(CHECKBIT(val64, 0x0))
3416                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3417
3418         /* Reading the Warning flags */
3419         addr = 0xA074;
3420         val64 = 0x0;
3421         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3422
3423         if(CHECKBIT(val64, 0x7))
3424                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3425
3426         if(CHECKBIT(val64, 0x6))
3427                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3428
3429         if(CHECKBIT(val64, 0x3))
3430                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3431
3432         if(CHECKBIT(val64, 0x2))
3433                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3434
3435         if(CHECKBIT(val64, 0x1))
3436                 stat_info->xpak_stat.warn_laser_output_power_high++;
3437
3438         if(CHECKBIT(val64, 0x0))
3439                 stat_info->xpak_stat.warn_laser_output_power_low++;
3440 }
3441
3442 /**
3443  *  wait_for_cmd_complete - waits for a command to complete.
3444  *  @sp : private member of the device structure, which is a pointer to the
3445  *  s2io_nic structure.
3446  *  Description: Function that waits for a command to Write into RMAC
3447  *  ADDR DATA registers to be completed and returns either success or
3448  *  error depending on whether the command was complete or not.
3449  *  Return value:
3450  *   SUCCESS on success and FAILURE on failure.
3451  */
3452
3453 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3454                                 int bit_state)
3455 {
3456         int ret = FAILURE, cnt = 0, delay = 1;
3457         u64 val64;
3458
3459         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3460                 return FAILURE;
3461
3462         do {
3463                 val64 = readq(addr);
3464                 if (bit_state == S2IO_BIT_RESET) {
3465                         if (!(val64 & busy_bit)) {
3466                                 ret = SUCCESS;
3467                                 break;
3468                         }
3469                 } else {
3470                         if (!(val64 & busy_bit)) {
3471                                 ret = SUCCESS;
3472                                 break;
3473                         }
3474                 }
3475
3476                 if(in_interrupt())
3477                         mdelay(delay);
3478                 else
3479                         msleep(delay);
3480
3481                 if (++cnt >= 10)
3482                         delay = 50;
3483         } while (cnt < 20);
3484         return ret;
3485 }
3486 /*
3487  * check_pci_device_id - Checks if the device id is supported
3488  * @id : device id
3489  * Description: Function to check if the pci device id is supported by driver.
3490  * Return value: Actual device id if supported else PCI_ANY_ID
3491  */
3492 static u16 check_pci_device_id(u16 id)
3493 {
3494         switch (id) {
3495         case PCI_DEVICE_ID_HERC_WIN:
3496         case PCI_DEVICE_ID_HERC_UNI:
3497                 return XFRAME_II_DEVICE;
3498         case PCI_DEVICE_ID_S2IO_UNI:
3499         case PCI_DEVICE_ID_S2IO_WIN:
3500                 return XFRAME_I_DEVICE;
3501         default:
3502                 return PCI_ANY_ID;
3503         }
3504 }
3505
3506 /**
3507  *  s2io_reset - Resets the card.
3508  *  @sp : private member of the device structure.
3509  *  Description: Function to Reset the card. This function then also
3510  *  restores the previously saved PCI configuration space registers as
3511  *  the card reset also resets the configuration space.
3512  *  Return value:
3513  *  void.
3514  */
3515
3516 static void s2io_reset(struct s2io_nic * sp)
3517 {
3518         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3519         u64 val64;
3520         u16 subid, pci_cmd;
3521         int i;
3522         u16 val16;
3523         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3524         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3525
3526         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3527                         __func__, sp->dev->name);
3528
3529         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3530         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3531
3532         val64 = SW_RESET_ALL;
3533         writeq(val64, &bar0->sw_reset);
3534         if (strstr(sp->product_name, "CX4")) {
3535                 msleep(750);
3536         }
3537         msleep(250);
3538         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3539
3540                 /* Restore the PCI state saved during initialization. */
3541                 pci_restore_state(sp->pdev);
3542                 pci_read_config_word(sp->pdev, 0x2, &val16);
3543                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3544                         break;
3545                 msleep(200);
3546         }
3547
3548         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3549                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__);
3550         }
3551
3552         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3553
3554         s2io_init_pci(sp);
3555
3556         /* Set swapper to enable I/O register access */
3557         s2io_set_swapper(sp);
3558
3559         /* restore mac_addr entries */
3560         do_s2io_restore_unicast_mc(sp);
3561
3562         /* Restore the MSIX table entries from local variables */
3563         restore_xmsi_data(sp);
3564
3565         /* Clear certain PCI/PCI-X fields after reset */
3566         if (sp->device_type == XFRAME_II_DEVICE) {
3567                 /* Clear "detected parity error" bit */
3568                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3569
3570                 /* Clearing PCIX Ecc status register */
3571                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3572
3573                 /* Clearing PCI_STATUS error reflected here */
3574                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3575         }
3576
3577         /* Reset device statistics maintained by OS */
3578         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3579
3580         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3581         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3582         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3583         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3584         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3585         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3586         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3587         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3588         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3589         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3590         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3591         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3592         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3593         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3594         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3595         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3596         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3597         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3598         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3599
3600         /* SXE-002: Configure link and activity LED to turn it off */
3601         subid = sp->pdev->subsystem_device;
3602         if (((subid & 0xFF) >= 0x07) &&
3603             (sp->device_type == XFRAME_I_DEVICE)) {
3604                 val64 = readq(&bar0->gpio_control);
3605                 val64 |= 0x0000800000000000ULL;
3606                 writeq(val64, &bar0->gpio_control);
3607                 val64 = 0x0411040400000000ULL;
3608                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3609         }
3610
3611         /*
3612          * Clear spurious ECC interrupts that would have occured on
3613          * XFRAME II cards after reset.
3614          */
3615         if (sp->device_type == XFRAME_II_DEVICE) {
3616                 val64 = readq(&bar0->pcc_err_reg);
3617                 writeq(val64, &bar0->pcc_err_reg);
3618         }
3619
3620         sp->device_enabled_once = false;
3621 }
3622
3623 /**
3624  *  s2io_set_swapper - to set the swapper controle on the card
3625  *  @sp : private member of the device structure,
3626  *  pointer to the s2io_nic structure.
3627  *  Description: Function to set the swapper control on the card
3628  *  correctly depending on the 'endianness' of the system.
3629  *  Return value:
3630  *  SUCCESS on success and FAILURE on failure.
3631  */
3632
3633 static int s2io_set_swapper(struct s2io_nic * sp)
3634 {
3635         struct net_device *dev = sp->dev;
3636         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3637         u64 val64, valt, valr;
3638
3639         /*
3640          * Set proper endian settings and verify the same by reading
3641          * the PIF Feed-back register.
3642          */
3643
3644         val64 = readq(&bar0->pif_rd_swapper_fb);
3645         if (val64 != 0x0123456789ABCDEFULL) {
3646                 int i = 0;
3647                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3648                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3649                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3650                                 0};                     /* FE=0, SE=0 */
3651
3652                 while(i<4) {
3653                         writeq(value[i], &bar0->swapper_ctrl);
3654                         val64 = readq(&bar0->pif_rd_swapper_fb);
3655                         if (val64 == 0x0123456789ABCDEFULL)
3656                                 break;
3657                         i++;
3658                 }
3659                 if (i == 4) {
3660                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3661                                 dev->name);
3662                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3663                                 (unsigned long long) val64);
3664                         return FAILURE;
3665                 }
3666                 valr = value[i];
3667         } else {
3668                 valr = readq(&bar0->swapper_ctrl);
3669         }
3670
3671         valt = 0x0123456789ABCDEFULL;
3672         writeq(valt, &bar0->xmsi_address);
3673         val64 = readq(&bar0->xmsi_address);
3674
3675         if(val64 != valt) {
3676                 int i = 0;
3677                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3678                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3679                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3680                                 0};                     /* FE=0, SE=0 */
3681
3682                 while(i<4) {
3683                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3684                         writeq(valt, &bar0->xmsi_address);
3685                         val64 = readq(&bar0->xmsi_address);
3686                         if(val64 == valt)
3687                                 break;
3688                         i++;
3689                 }
3690                 if(i == 4) {
3691                         unsigned long long x = val64;
3692                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3693                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3694                         return FAILURE;
3695                 }
3696         }
3697         val64 = readq(&bar0->swapper_ctrl);
3698         val64 &= 0xFFFF000000000000ULL;
3699
3700 #ifdef  __BIG_ENDIAN
3701         /*
3702          * The device by default set to a big endian format, so a
3703          * big endian driver need not set anything.
3704          */
3705         val64 |= (SWAPPER_CTRL_TXP_FE |
3706                  SWAPPER_CTRL_TXP_SE |
3707                  SWAPPER_CTRL_TXD_R_FE |
3708                  SWAPPER_CTRL_TXD_W_FE |
3709                  SWAPPER_CTRL_TXF_R_FE |
3710                  SWAPPER_CTRL_RXD_R_FE |
3711                  SWAPPER_CTRL_RXD_W_FE |
3712                  SWAPPER_CTRL_RXF_W_FE |
3713                  SWAPPER_CTRL_XMSI_FE |
3714                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3715         if (sp->config.intr_type == INTA)
3716                 val64 |= SWAPPER_CTRL_XMSI_SE;
3717         writeq(val64, &bar0->swapper_ctrl);
3718 #else
3719         /*
3720          * Initially we enable all bits to make it accessible by the
3721          * driver, then we selectively enable only those bits that
3722          * we want to set.
3723          */
3724         val64 |= (SWAPPER_CTRL_TXP_FE |
3725                  SWAPPER_CTRL_TXP_SE |
3726                  SWAPPER_CTRL_TXD_R_FE |
3727                  SWAPPER_CTRL_TXD_R_SE |
3728                  SWAPPER_CTRL_TXD_W_FE |
3729                  SWAPPER_CTRL_TXD_W_SE |
3730                  SWAPPER_CTRL_TXF_R_FE |
3731                  SWAPPER_CTRL_RXD_R_FE |
3732                  SWAPPER_CTRL_RXD_R_SE |
3733                  SWAPPER_CTRL_RXD_W_FE |
3734                  SWAPPER_CTRL_RXD_W_SE |
3735                  SWAPPER_CTRL_RXF_W_FE |
3736                  SWAPPER_CTRL_XMSI_FE |
3737                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3738         if (sp->config.intr_type == INTA)
3739                 val64 |= SWAPPER_CTRL_XMSI_SE;
3740         writeq(val64, &bar0->swapper_ctrl);
3741 #endif
3742         val64 = readq(&bar0->swapper_ctrl);
3743
3744         /*
3745          * Verifying if endian settings are accurate by reading a
3746          * feedback register.
3747          */
3748         val64 = readq(&bar0->pif_rd_swapper_fb);
3749         if (val64 != 0x0123456789ABCDEFULL) {
3750                 /* Endian settings are incorrect, calls for another dekko. */
3751                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3752                           dev->name);
3753                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3754                           (unsigned long long) val64);
3755                 return FAILURE;
3756         }
3757
3758         return SUCCESS;
3759 }
3760
3761 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3762 {
3763         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3764         u64 val64;
3765         int ret = 0, cnt = 0;
3766
3767         do {
3768                 val64 = readq(&bar0->xmsi_access);
3769                 if (!(val64 & s2BIT(15)))
3770                         break;
3771                 mdelay(1);
3772                 cnt++;
3773         } while(cnt < 5);
3774         if (cnt == 5) {
3775                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3776                 ret = 1;
3777         }
3778
3779         return ret;
3780 }
3781
3782 static void restore_xmsi_data(struct s2io_nic *nic)
3783 {
3784         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3785         u64 val64;
3786         int i, msix_index;
3787
3788
3789         if (nic->device_type == XFRAME_I_DEVICE)
3790                 return;
3791
3792         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3793                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3794                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3795                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3796                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3797                 writeq(val64, &bar0->xmsi_access);
3798                 if (wait_for_msix_trans(nic, msix_index)) {
3799                         DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3800                         continue;
3801                 }
3802         }
3803 }
3804
3805 static void store_xmsi_data(struct s2io_nic *nic)
3806 {
3807         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3808         u64 val64, addr, data;
3809         int i, msix_index;
3810
3811         if (nic->device_type == XFRAME_I_DEVICE)
3812                 return;
3813
3814         /* Store and display */
3815         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3816                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3817                 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3818                 writeq(val64, &bar0->xmsi_access);
3819                 if (wait_for_msix_trans(nic, msix_index)) {
3820                         DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3821                         continue;
3822                 }
3823                 addr = readq(&bar0->xmsi_address);
3824                 data = readq(&bar0->xmsi_data);
3825                 if (addr && data) {
3826                         nic->msix_info[i].addr = addr;
3827                         nic->msix_info[i].data = data;
3828                 }
3829         }
3830 }
3831
3832 static int s2io_enable_msi_x(struct s2io_nic *nic)
3833 {
3834         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3835         u64 rx_mat;
3836         u16 msi_control; /* Temp variable */
3837         int ret, i, j, msix_indx = 1;
3838
3839         nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3840                                GFP_KERNEL);
3841         if (!nic->entries) {
3842                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3843                         __func__);
3844                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3845                 return -ENOMEM;
3846         }
3847         nic->mac_control.stats_info->sw_stat.mem_allocated
3848                 += (nic->num_entries * sizeof(struct msix_entry));
3849
3850         memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3851
3852         nic->s2io_entries =
3853                 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3854                                    GFP_KERNEL);
3855         if (!nic->s2io_entries) {
3856                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3857                         __func__);
3858                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3859                 kfree(nic->entries);
3860                 nic->mac_control.stats_info->sw_stat.mem_freed
3861                         += (nic->num_entries * sizeof(struct msix_entry));
3862                 return -ENOMEM;
3863         }
3864          nic->mac_control.stats_info->sw_stat.mem_allocated
3865                 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3866         memset(nic->s2io_entries, 0,
3867                 nic->num_entries * sizeof(struct s2io_msix_entry));
3868
3869         nic->entries[0].entry = 0;
3870         nic->s2io_entries[0].entry = 0;
3871         nic->s2io_entries[0].in_use = MSIX_FLG;
3872         nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3873         nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3874
3875         for (i = 1; i < nic->num_entries; i++) {
3876                 nic->entries[i].entry = ((i - 1) * 8) + 1;
3877                 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3878                 nic->s2io_entries[i].arg = NULL;
3879                 nic->s2io_entries[i].in_use = 0;
3880         }
3881
3882         rx_mat = readq(&bar0->rx_mat);
3883         for (j = 0; j < nic->config.rx_ring_num; j++) {
3884                 rx_mat |= RX_MAT_SET(j, msix_indx);
3885                 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3886                 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3887                 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3888                 msix_indx += 8;
3889         }
3890         writeq(rx_mat, &bar0->rx_mat);
3891         readq(&bar0->rx_mat);
3892
3893         ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3894         /* We fail init if error or we get less vectors than min required */
3895         if (ret) {
3896                 DBG_PRINT(ERR_DBG, "s2io: Enabling MSI-X failed\n");
3897                 kfree(nic->entries);
3898                 nic->mac_control.stats_info->sw_stat.mem_freed
3899                         += (nic->num_entries * sizeof(struct msix_entry));
3900                 kfree(nic->s2io_entries);
3901                 nic->mac_control.stats_info->sw_stat.mem_freed
3902                         += (nic->num_entries * sizeof(struct s2io_msix_entry));
3903                 nic->entries = NULL;
3904                 nic->s2io_entries = NULL;
3905                 return -ENOMEM;
3906         }
3907
3908         /*
3909          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3910          * in the herc NIC. (Temp change, needs to be removed later)
3911          */
3912         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3913         msi_control |= 0x1; /* Enable MSI */
3914         pci_write_config_word(nic->pdev, 0x42, msi_control);
3915
3916         return 0;
3917 }
3918
3919 /* Handle software interrupt used during MSI(X) test */
3920 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3921 {
3922         struct s2io_nic *sp = dev_id;
3923
3924         sp->msi_detected = 1;
3925         wake_up(&sp->msi_wait);
3926
3927         return IRQ_HANDLED;
3928 }
3929
3930 /* Test interrupt path by forcing a a software IRQ */
3931 static int s2io_test_msi(struct s2io_nic *sp)
3932 {
3933         struct pci_dev *pdev = sp->pdev;
3934         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3935         int err;
3936         u64 val64, saved64;
3937
3938         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3939                         sp->name, sp);
3940         if (err) {
3941                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3942                        sp->dev->name, pci_name(pdev), pdev->irq);
3943                 return err;
3944         }
3945
3946         init_waitqueue_head (&sp->msi_wait);
3947         sp->msi_detected = 0;
3948
3949         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3950         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3951         val64 |= SCHED_INT_CTRL_TIMER_EN;
3952         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3953         writeq(val64, &bar0->scheduled_int_ctrl);
3954
3955         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3956
3957         if (!sp->msi_detected) {
3958                 /* MSI(X) test failed, go back to INTx mode */
3959                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3960                         "using MSI(X) during test\n", sp->dev->name,
3961                         pci_name(pdev));
3962
3963                 err = -EOPNOTSUPP;
3964         }
3965
3966         free_irq(sp->entries[1].vector, sp);
3967
3968         writeq(saved64, &bar0->scheduled_int_ctrl);
3969
3970         return err;
3971 }
3972
3973 static void remove_msix_isr(struct s2io_nic *sp)
3974 {
3975         int i;
3976         u16 msi_control;
3977
3978         for (i = 0; i < sp->num_entries; i++) {
3979                 if (sp->s2io_entries[i].in_use ==
3980                         MSIX_REGISTERED_SUCCESS) {
3981                         int vector = sp->entries[i].vector;
3982                         void *arg = sp->s2io_entries[i].arg;
3983                         free_irq(vector, arg);
3984                 }
3985         }
3986
3987         kfree(sp->entries);
3988         kfree(sp->s2io_entries);
3989         sp->entries = NULL;
3990         sp->s2io_entries = NULL;
3991
3992         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3993         msi_control &= 0xFFFE; /* Disable MSI */
3994         pci_write_config_word(sp->pdev, 0x42, msi_control);
3995
3996         pci_disable_msix(sp->pdev);
3997 }
3998
3999 static void remove_inta_isr(struct s2io_nic *sp)
4000 {
4001         struct net_device *dev = sp->dev;
4002
4003         free_irq(sp->pdev->irq, dev);
4004 }
4005
4006 /* ********************************************************* *
4007  * Functions defined below concern the OS part of the driver *
4008  * ********************************************************* */
4009
4010 /**
4011  *  s2io_open - open entry point of the driver
4012  *  @dev : pointer to the device structure.
4013  *  Description:
4014  *  This function is the open entry point of the driver. It mainly calls a
4015  *  function to allocate Rx buffers and inserts them into the buffer
4016  *  descriptors and then enables the Rx part of the NIC.
4017  *  Return value:
4018  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4019  *   file on failure.
4020  */
4021
4022 static int s2io_open(struct net_device *dev)
4023 {
4024         struct s2io_nic *sp = netdev_priv(dev);
4025         int err = 0;
4026
4027         /*
4028          * Make sure you have link off by default every time
4029          * Nic is initialized
4030          */
4031         netif_carrier_off(dev);
4032         sp->last_link_state = 0;
4033
4034         /* Initialize H/W and enable interrupts */
4035         err = s2io_card_up(sp);
4036         if (err) {
4037                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4038                           dev->name);
4039                 goto hw_init_failed;
4040         }
4041
4042         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4043                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4044                 s2io_card_down(sp);
4045                 err = -ENODEV;
4046                 goto hw_init_failed;
4047         }
4048         s2io_start_all_tx_queue(sp);
4049         return 0;
4050
4051 hw_init_failed:
4052         if (sp->config.intr_type == MSI_X) {
4053                 if (sp->entries) {
4054                         kfree(sp->entries);
4055                         sp->mac_control.stats_info->sw_stat.mem_freed
4056                         += (sp->num_entries * sizeof(struct msix_entry));
4057                 }
4058                 if (sp->s2io_entries) {
4059                         kfree(sp->s2io_entries);
4060                         sp->mac_control.stats_info->sw_stat.mem_freed
4061                         += (sp->num_entries * sizeof(struct s2io_msix_entry));
4062                 }
4063         }
4064         return err;
4065 }
4066
4067 /**
4068  *  s2io_close -close entry point of the driver
4069  *  @dev : device pointer.
4070  *  Description:
4071  *  This is the stop entry point of the driver. It needs to undo exactly
4072  *  whatever was done by the open entry point,thus it's usually referred to
4073  *  as the close function.Among other things this function mainly stops the
4074  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4075  *  Return value:
4076  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4077  *  file on failure.
4078  */
4079
4080 static int s2io_close(struct net_device *dev)
4081 {
4082         struct s2io_nic *sp = netdev_priv(dev);
4083         struct config_param *config = &sp->config;
4084         u64 tmp64;
4085         int offset;
4086
4087         /* Return if the device is already closed               *
4088         *  Can happen when s2io_card_up failed in change_mtu    *
4089         */
4090         if (!is_s2io_card_up(sp))
4091                 return 0;
4092
4093         s2io_stop_all_tx_queue(sp);
4094         /* delete all populated mac entries */
4095         for (offset = 1; offset < config->max_mc_addr; offset++) {
4096                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4097                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4098                         do_s2io_delete_unicast_mc(sp, tmp64);
4099         }
4100
4101         s2io_card_down(sp);
4102
4103         return 0;
4104 }
4105
4106 /**
4107  *  s2io_xmit - Tx entry point of te driver
4108  *  @skb : the socket buffer containing the Tx data.
4109  *  @dev : device pointer.
4110  *  Description :
4111  *  This function is the Tx entry point of the driver. S2IO NIC supports
4112  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4113  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4114  *  not be upadted.
4115  *  Return value:
4116  *  0 on success & 1 on failure.
4117  */
4118
4119 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4120 {
4121         struct s2io_nic *sp = netdev_priv(dev);
4122         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4123         register u64 val64;
4124         struct TxD *txdp;
4125         struct TxFIFO_element __iomem *tx_fifo;
4126         unsigned long flags = 0;
4127         u16 vlan_tag = 0;
4128         struct fifo_info *fifo = NULL;
4129         struct mac_info *mac_control;
4130         struct config_param *config;
4131         int do_spin_lock = 1;
4132         int offload_type;
4133         int enable_per_list_interrupt = 0;
4134         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4135
4136         mac_control = &sp->mac_control;
4137         config = &sp->config;
4138
4139         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4140
4141         if (unlikely(skb->len <= 0)) {
4142                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4143                 dev_kfree_skb_any(skb);
4144                 return NETDEV_TX_OK;
4145         }
4146
4147         if (!is_s2io_card_up(sp)) {
4148                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4149                           dev->name);
4150                 dev_kfree_skb(skb);
4151                 return NETDEV_TX_OK;
4152         }
4153
4154         queue = 0;
4155         if (sp->vlgrp && vlan_tx_tag_present(skb))
4156                 vlan_tag = vlan_tx_tag_get(skb);
4157         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4158                 if (skb->protocol == htons(ETH_P_IP)) {
4159                         struct iphdr *ip;
4160                         struct tcphdr *th;
4161                         ip = ip_hdr(skb);
4162
4163                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4164                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4165                                                 ip->ihl*4);
4166
4167                                 if (ip->protocol == IPPROTO_TCP) {
4168                                         queue_len = sp->total_tcp_fifos;
4169                                         queue = (ntohs(th->source) +
4170                                                         ntohs(th->dest)) &
4171                                             sp->fifo_selector[queue_len - 1];
4172                                         if (queue >= queue_len)
4173                                                 queue = queue_len - 1;
4174                                 } else if (ip->protocol == IPPROTO_UDP) {
4175                                         queue_len = sp->total_udp_fifos;
4176                                         queue = (ntohs(th->source) +
4177                                                         ntohs(th->dest)) &
4178                                             sp->fifo_selector[queue_len - 1];
4179                                         if (queue >= queue_len)
4180                                                 queue = queue_len - 1;
4181                                         queue += sp->udp_fifo_idx;
4182                                         if (skb->len > 1024)
4183                                                 enable_per_list_interrupt = 1;
4184                                         do_spin_lock = 0;
4185                                 }
4186                         }
4187                 }
4188         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4189                 /* get fifo number based on skb->priority value */
4190                 queue = config->fifo_mapping
4191                                         [skb->priority & (MAX_TX_FIFOS - 1)];
4192         fifo = &mac_control->fifos[queue];
4193
4194         if (do_spin_lock)
4195                 spin_lock_irqsave(&fifo->tx_lock, flags);
4196         else {
4197                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4198                         return NETDEV_TX_LOCKED;
4199         }
4200
4201         if (sp->config.multiq) {
4202                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4203                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4204                         return NETDEV_TX_BUSY;
4205                 }
4206         } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4207                 if (netif_queue_stopped(dev)) {
4208                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4209                         return NETDEV_TX_BUSY;
4210                 }
4211         }
4212
4213         put_off = (u16) fifo->tx_curr_put_info.offset;
4214         get_off = (u16) fifo->tx_curr_get_info.offset;
4215         txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4216
4217         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4218         /* Avoid "put" pointer going beyond "get" pointer */
4219         if (txdp->Host_Control ||
4220                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4221                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4222                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4223                 dev_kfree_skb(skb);
4224                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4225                 return NETDEV_TX_OK;
4226         }
4227
4228         offload_type = s2io_offload_type(skb);
4229         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4230                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4231                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4232         }
4233         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4234                 txdp->Control_2 |=
4235                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4236                      TXD_TX_CKO_UDP_EN);
4237         }
4238         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4239         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4240         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4241         if (enable_per_list_interrupt)
4242                 if (put_off & (queue_len >> 5))
4243                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4244         if (vlan_tag) {
4245                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4246                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4247         }
4248
4249         frg_len = skb->len - skb->data_len;
4250         if (offload_type == SKB_GSO_UDP) {
4251                 int ufo_size;
4252
4253                 ufo_size = s2io_udp_mss(skb);
4254                 ufo_size &= ~7;
4255                 txdp->Control_1 |= TXD_UFO_EN;
4256                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4257                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4258 #ifdef __BIG_ENDIAN
4259                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4260                 fifo->ufo_in_band_v[put_off] =
4261                                 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4262 #else
4263                 fifo->ufo_in_band_v[put_off] =
4264                                 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4265 #endif
4266                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4267                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4268                                         fifo->ufo_in_band_v,
4269                                         sizeof(u64), PCI_DMA_TODEVICE);
4270                 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4271                         goto pci_map_failed;
4272                 txdp++;
4273         }
4274
4275         txdp->Buffer_Pointer = pci_map_single
4276             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4277         if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4278                 goto pci_map_failed;
4279
4280         txdp->Host_Control = (unsigned long) skb;
4281         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4282         if (offload_type == SKB_GSO_UDP)
4283                 txdp->Control_1 |= TXD_UFO_EN;
4284
4285         frg_cnt = skb_shinfo(skb)->nr_frags;
4286         /* For fragmented SKB. */
4287         for (i = 0; i < frg_cnt; i++) {
4288                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4289                 /* A '0' length fragment will be ignored */
4290                 if (!frag->size)
4291                         continue;
4292                 txdp++;
4293                 txdp->Buffer_Pointer = (u64) pci_map_page
4294                     (sp->pdev, frag->page, frag->page_offset,
4295                      frag->size, PCI_DMA_TODEVICE);
4296                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4297                 if (offload_type == SKB_GSO_UDP)
4298                         txdp->Control_1 |= TXD_UFO_EN;
4299         }
4300         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4301
4302         if (offload_type == SKB_GSO_UDP)
4303                 frg_cnt++; /* as Txd0 was used for inband header */
4304
4305         tx_fifo = mac_control->tx_FIFO_start[queue];
4306         val64 = fifo->list_info[put_off].list_phy_addr;
4307         writeq(val64, &tx_fifo->TxDL_Pointer);
4308
4309         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4310                  TX_FIFO_LAST_LIST);
4311         if (offload_type)
4312                 val64 |= TX_FIFO_SPECIAL_FUNC;
4313
4314         writeq(val64, &tx_fifo->List_Control);
4315
4316         mmiowb();
4317
4318         put_off++;
4319         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4320                 put_off = 0;
4321         fifo->tx_curr_put_info.offset = put_off;
4322
4323         /* Avoid "put" pointer going beyond "get" pointer */
4324         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4325                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4326                 DBG_PRINT(TX_DBG,
4327                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4328                           put_off, get_off);
4329                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4330         }
4331         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4332         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4333
4334         if (sp->config.intr_type == MSI_X)
4335                 tx_intr_handler(fifo);
4336
4337         return NETDEV_TX_OK;
4338 pci_map_failed:
4339         stats->pci_map_fail_cnt++;
4340         s2io_stop_tx_queue(sp, fifo->fifo_no);
4341         stats->mem_freed += skb->truesize;
4342         dev_kfree_skb(skb);
4343         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4344         return NETDEV_TX_OK;
4345 }
4346
4347 static void
4348 s2io_alarm_handle(unsigned long data)
4349 {
4350         struct s2io_nic *sp = (struct s2io_nic *)data;
4351         struct net_device *dev = sp->dev;
4352
4353         s2io_handle_errors(dev);
4354         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4355 }
4356
4357 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4358 {
4359         struct ring_info *ring = (struct ring_info *)dev_id;
4360         struct s2io_nic *sp = ring->nic;
4361         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4362
4363         if (unlikely(!is_s2io_card_up(sp)))
4364                 return IRQ_HANDLED;
4365
4366         if (sp->config.napi) {
4367                 u8 __iomem *addr = NULL;
4368                 u8 val8 = 0;
4369
4370                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4371                 addr += (7 - ring->ring_no);
4372                 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4373                 writeb(val8, addr);
4374                 val8 = readb(addr);
4375                 napi_schedule(&ring->napi);
4376         } else {
4377                 rx_intr_handler(ring, 0);
4378                 s2io_chk_rx_buffers(sp, ring);
4379         }
4380
4381         return IRQ_HANDLED;
4382 }
4383
4384 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4385 {
4386         int i;
4387         struct fifo_info *fifos = (struct fifo_info *)dev_id;
4388         struct s2io_nic *sp = fifos->nic;
4389         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4390         struct config_param *config  = &sp->config;
4391         u64 reason;
4392
4393         if (unlikely(!is_s2io_card_up(sp)))
4394                 return IRQ_NONE;
4395
4396         reason = readq(&bar0->general_int_status);
4397         if (unlikely(reason == S2IO_MINUS_ONE))
4398                 /* Nothing much can be done. Get out */
4399                 return IRQ_HANDLED;
4400
4401         if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4402                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4403
4404                 if (reason & GEN_INTR_TXPIC)
4405                         s2io_txpic_intr_handle(sp);
4406
4407                 if (reason & GEN_INTR_TXTRAFFIC)
4408                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4409
4410                 for (i = 0; i < config->tx_fifo_num; i++)
4411                         tx_intr_handler(&fifos[i]);
4412
4413                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4414                 readl(&bar0->general_int_status);
4415                 return IRQ_HANDLED;
4416         }
4417         /* The interrupt was not raised by us */
4418         return IRQ_NONE;
4419 }
4420
4421 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4422 {
4423         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4424         u64 val64;
4425
4426         val64 = readq(&bar0->pic_int_status);
4427         if (val64 & PIC_INT_GPIO) {
4428                 val64 = readq(&bar0->gpio_int_reg);
4429                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4430                     (val64 & GPIO_INT_REG_LINK_UP)) {
4431                         /*
4432                          * This is unstable state so clear both up/down
4433                          * interrupt and adapter to re-evaluate the link state.
4434                          */
4435                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4436                         val64 |= GPIO_INT_REG_LINK_UP;
4437                         writeq(val64, &bar0->gpio_int_reg);
4438                         val64 = readq(&bar0->gpio_int_mask);
4439                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4440                                    GPIO_INT_MASK_LINK_DOWN);
4441                         writeq(val64, &bar0->gpio_int_mask);
4442                 }
4443                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4444                         val64 = readq(&bar0->adapter_status);
4445                                 /* Enable Adapter */
4446                         val64 = readq(&bar0->adapter_control);
4447                         val64 |= ADAPTER_CNTL_EN;
4448                         writeq(val64, &bar0->adapter_control);
4449                         val64 |= ADAPTER_LED_ON;
4450                         writeq(val64, &bar0->adapter_control);
4451                         if (!sp->device_enabled_once)
4452                                 sp->device_enabled_once = 1;
4453
4454                         s2io_link(sp, LINK_UP);
4455                         /*
4456                          * unmask link down interrupt and mask link-up
4457                          * intr
4458                          */
4459                         val64 = readq(&bar0->gpio_int_mask);
4460                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4461                         val64 |= GPIO_INT_MASK_LINK_UP;
4462                         writeq(val64, &bar0->gpio_int_mask);
4463
4464                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4465                         val64 = readq(&bar0->adapter_status);
4466                         s2io_link(sp, LINK_DOWN);
4467                         /* Link is down so unmaks link up interrupt */
4468                         val64 = readq(&bar0->gpio_int_mask);
4469                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4470                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4471                         writeq(val64, &bar0->gpio_int_mask);
4472
4473                         /* turn off LED */
4474                         val64 = readq(&bar0->adapter_control);
4475                         val64 = val64 &(~ADAPTER_LED_ON);
4476                         writeq(val64, &bar0->adapter_control);
4477                 }
4478         }
4479         val64 = readq(&bar0->gpio_int_mask);
4480 }
4481
4482 /**
4483  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4484  *  @value: alarm bits
4485  *  @addr: address value
4486  *  @cnt: counter variable
4487  *  Description: Check for alarm and increment the counter
4488  *  Return Value:
4489  *  1 - if alarm bit set
4490  *  0 - if alarm bit is not set
4491  */
4492 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4493                           unsigned long long *cnt)
4494 {
4495         u64 val64;
4496         val64 = readq(addr);
4497         if ( val64 & value ) {
4498                 writeq(val64, addr);
4499                 (*cnt)++;
4500                 return 1;
4501         }
4502         return 0;
4503
4504 }
4505
4506 /**
4507  *  s2io_handle_errors - Xframe error indication handler
4508  *  @nic: device private variable
4509  *  Description: Handle alarms such as loss of link, single or
4510  *  double ECC errors, critical and serious errors.
4511  *  Return Value:
4512  *  NONE
4513  */
4514 static void s2io_handle_errors(void * dev_id)
4515 {
4516         struct net_device *dev = (struct net_device *) dev_id;
4517         struct s2io_nic *sp = netdev_priv(dev);
4518         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4519         u64 temp64 = 0,val64=0;
4520         int i = 0;
4521
4522         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4523         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4524
4525         if (!is_s2io_card_up(sp))
4526                 return;
4527
4528         if (pci_channel_offline(sp->pdev))
4529                 return;
4530
4531         memset(&sw_stat->ring_full_cnt, 0,
4532                 sizeof(sw_stat->ring_full_cnt));
4533
4534         /* Handling the XPAK counters update */
4535         if(stats->xpak_timer_count < 72000) {
4536                 /* waiting for an hour */
4537                 stats->xpak_timer_count++;
4538         } else {
4539                 s2io_updt_xpak_counter(dev);
4540                 /* reset the count to zero */
4541                 stats->xpak_timer_count = 0;
4542         }
4543
4544         /* Handling link status change error Intr */
4545         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4546                 val64 = readq(&bar0->mac_rmac_err_reg);
4547                 writeq(val64, &bar0->mac_rmac_err_reg);
4548                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4549                         schedule_work(&sp->set_link_task);
4550         }
4551
4552         /* In case of a serious error, the device will be Reset. */
4553         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4554                                 &sw_stat->serious_err_cnt))
4555                 goto reset;
4556
4557         /* Check for data parity error */
4558         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4559                                 &sw_stat->parity_err_cnt))
4560                 goto reset;
4561
4562         /* Check for ring full counter */
4563         if (sp->device_type == XFRAME_II_DEVICE) {
4564                 val64 = readq(&bar0->ring_bump_counter1);
4565                 for (i=0; i<4; i++) {
4566                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4567                         temp64 >>= 64 - ((i+1)*16);
4568                         sw_stat->ring_full_cnt[i] += temp64;
4569                 }
4570
4571                 val64 = readq(&bar0->ring_bump_counter2);
4572                 for (i=0; i<4; i++) {
4573                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4574                         temp64 >>= 64 - ((i+1)*16);
4575                          sw_stat->ring_full_cnt[i+4] += temp64;
4576                 }
4577         }
4578
4579         val64 = readq(&bar0->txdma_int_status);
4580         /*check for pfc_err*/
4581         if (val64 & TXDMA_PFC_INT) {
4582                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4583                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4584                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4585                                 &sw_stat->pfc_err_cnt))
4586                         goto reset;
4587                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4588                                 &sw_stat->pfc_err_cnt);
4589         }
4590
4591         /*check for tda_err*/
4592         if (val64 & TXDMA_TDA_INT) {
4593                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4594                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4595                                 &sw_stat->tda_err_cnt))
4596                         goto reset;
4597                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4598                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4599         }
4600         /*check for pcc_err*/
4601         if (val64 & TXDMA_PCC_INT) {
4602                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4603                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4604                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4605                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4606                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4607                                 &sw_stat->pcc_err_cnt))
4608                         goto reset;
4609                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4610                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4611         }
4612
4613         /*check for tti_err*/
4614         if (val64 & TXDMA_TTI_INT) {
4615                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4616                                 &sw_stat->tti_err_cnt))
4617                         goto reset;
4618                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4619                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4620         }
4621
4622         /*check for lso_err*/
4623         if (val64 & TXDMA_LSO_INT) {
4624                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4625                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4626                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4627                         goto reset;
4628                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4629                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4630         }
4631
4632         /*check for tpa_err*/
4633         if (val64 & TXDMA_TPA_INT) {
4634                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4635                         &sw_stat->tpa_err_cnt))
4636                         goto reset;
4637                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4638                         &sw_stat->tpa_err_cnt);
4639         }
4640
4641         /*check for sm_err*/
4642         if (val64 & TXDMA_SM_INT) {
4643                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4644                         &sw_stat->sm_err_cnt))
4645                         goto reset;
4646         }
4647
4648         val64 = readq(&bar0->mac_int_status);
4649         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4650                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4651                                 &bar0->mac_tmac_err_reg,
4652                                 &sw_stat->mac_tmac_err_cnt))
4653                         goto reset;
4654                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4655                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4656                                 &bar0->mac_tmac_err_reg,
4657                                 &sw_stat->mac_tmac_err_cnt);
4658         }
4659
4660         val64 = readq(&bar0->xgxs_int_status);
4661         if (val64 & XGXS_INT_STATUS_TXGXS) {
4662                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4663                                 &bar0->xgxs_txgxs_err_reg,
4664                                 &sw_stat->xgxs_txgxs_err_cnt))
4665                         goto reset;
4666                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4667                                 &bar0->xgxs_txgxs_err_reg,
4668                                 &sw_stat->xgxs_txgxs_err_cnt);
4669         }
4670
4671         val64 = readq(&bar0->rxdma_int_status);
4672         if (val64 & RXDMA_INT_RC_INT_M) {
4673                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4674                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4675                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4676                         goto reset;
4677                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4678                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4679                                 &sw_stat->rc_err_cnt);
4680                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4681                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4682                                 &sw_stat->prc_pcix_err_cnt))
4683                         goto reset;
4684                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4685                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4686                                 &sw_stat->prc_pcix_err_cnt);
4687         }
4688
4689         if (val64 & RXDMA_INT_RPA_INT_M) {
4690                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4691                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4692                         goto reset;
4693                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4694                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4695         }
4696
4697         if (val64 & RXDMA_INT_RDA_INT_M) {
4698                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4699                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4700                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4701                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4702                         goto reset;
4703                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4704                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4705                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4706         }
4707
4708         if (val64 & RXDMA_INT_RTI_INT_M) {
4709                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4710                                 &sw_stat->rti_err_cnt))
4711                         goto reset;
4712                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4713                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4714         }
4715
4716         val64 = readq(&bar0->mac_int_status);
4717         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4718                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4719                                 &bar0->mac_rmac_err_reg,
4720                                 &sw_stat->mac_rmac_err_cnt))
4721                         goto reset;
4722                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4723                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4724                                 &sw_stat->mac_rmac_err_cnt);
4725         }
4726
4727         val64 = readq(&bar0->xgxs_int_status);
4728         if (val64 & XGXS_INT_STATUS_RXGXS) {
4729                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4730                                 &bar0->xgxs_rxgxs_err_reg,
4731                                 &sw_stat->xgxs_rxgxs_err_cnt))
4732                         goto reset;
4733         }
4734
4735         val64 = readq(&bar0->mc_int_status);
4736         if(val64 & MC_INT_STATUS_MC_INT) {
4737                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4738                                 &sw_stat->mc_err_cnt))
4739                         goto reset;
4740
4741                 /* Handling Ecc errors */
4742                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4743                         writeq(val64, &bar0->mc_err_reg);
4744                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4745                                 sw_stat->double_ecc_errs++;
4746                                 if (sp->device_type != XFRAME_II_DEVICE) {
4747                                         /*
4748                                          * Reset XframeI only if critical error
4749                                          */
4750                                         if (val64 &
4751                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4752                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4753                                                                 goto reset;
4754                                         }
4755                         } else
4756                                 sw_stat->single_ecc_errs++;
4757                 }
4758         }
4759         return;
4760
4761 reset:
4762         s2io_stop_all_tx_queue(sp);
4763         schedule_work(&sp->rst_timer_task);
4764         sw_stat->soft_reset_cnt++;
4765         return;
4766 }
4767
4768 /**
4769  *  s2io_isr - ISR handler of the device .
4770  *  @irq: the irq of the device.
4771  *  @dev_id: a void pointer to the dev structure of the NIC.
4772  *  Description:  This function is the ISR handler of the device. It
4773  *  identifies the reason for the interrupt and calls the relevant
4774  *  service routines. As a contongency measure, this ISR allocates the
4775  *  recv buffers, if their numbers are below the panic value which is
4776  *  presently set to 25% of the original number of rcv buffers allocated.
4777  *  Return value:
4778  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4779  *   IRQ_NONE: will be returned if interrupt is not from our device
4780  */
4781 static irqreturn_t s2io_isr(int irq, void *dev_id)
4782 {
4783         struct net_device *dev = (struct net_device *) dev_id;
4784         struct s2io_nic *sp = netdev_priv(dev);
4785         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4786         int i;
4787         u64 reason = 0;
4788         struct mac_info *mac_control;
4789         struct config_param *config;
4790
4791         /* Pretend we handled any irq's from a disconnected card */
4792         if (pci_channel_offline(sp->pdev))
4793                 return IRQ_NONE;
4794
4795         if (!is_s2io_card_up(sp))
4796                 return IRQ_NONE;
4797
4798         mac_control = &sp->mac_control;
4799         config = &sp->config;
4800
4801         /*
4802          * Identify the cause for interrupt and call the appropriate
4803          * interrupt handler. Causes for the interrupt could be;
4804          * 1. Rx of packet.
4805          * 2. Tx complete.
4806          * 3. Link down.
4807          */
4808         reason = readq(&bar0->general_int_status);
4809
4810         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4811                 /* Nothing much can be done. Get out */
4812                 return IRQ_HANDLED;
4813         }
4814
4815         if (reason & (GEN_INTR_RXTRAFFIC |
4816                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4817         {
4818                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4819
4820                 if (config->napi) {
4821                         if (reason & GEN_INTR_RXTRAFFIC) {
4822                                 napi_schedule(&sp->napi);
4823                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4824                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4825                                 readl(&bar0->rx_traffic_int);
4826                         }
4827                 } else {
4828                         /*
4829                          * rx_traffic_int reg is an R1 register, writing all 1's
4830                          * will ensure that the actual interrupt causing bit
4831                          * get's cleared and hence a read can be avoided.
4832                          */
4833                         if (reason & GEN_INTR_RXTRAFFIC)
4834                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4835
4836                         for (i = 0; i < config->rx_ring_num; i++) {
4837                                 struct ring_info *ring = &mac_control->rings[i];
4838
4839                                 rx_intr_handler(ring, 0);
4840                         }
4841                 }
4842
4843                 /*
4844                  * tx_traffic_int reg is an R1 register, writing all 1's
4845                  * will ensure that the actual interrupt causing bit get's
4846                  * cleared and hence a read can be avoided.
4847                  */
4848                 if (reason & GEN_INTR_TXTRAFFIC)
4849                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4850
4851                 for (i = 0; i < config->tx_fifo_num; i++)
4852                         tx_intr_handler(&mac_control->fifos[i]);
4853
4854                 if (reason & GEN_INTR_TXPIC)
4855                         s2io_txpic_intr_handle(sp);
4856
4857                 /*
4858                  * Reallocate the buffers from the interrupt handler itself.
4859                  */
4860                 if (!config->napi) {
4861                         for (i = 0; i < config->rx_ring_num; i++) {
4862                                 struct ring_info *ring = &mac_control->rings[i];
4863
4864                                 s2io_chk_rx_buffers(sp, ring);
4865                         }
4866                 }
4867                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4868                 readl(&bar0->general_int_status);
4869
4870                 return IRQ_HANDLED;
4871
4872         }
4873         else if (!reason) {
4874                 /* The interrupt was not raised by us */
4875                 return IRQ_NONE;
4876         }
4877
4878         return IRQ_HANDLED;
4879 }
4880
4881 /**
4882  * s2io_updt_stats -
4883  */
4884 static void s2io_updt_stats(struct s2io_nic *sp)
4885 {
4886         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4887         u64 val64;
4888         int cnt = 0;
4889
4890         if (is_s2io_card_up(sp)) {
4891                 /* Apprx 30us on a 133 MHz bus */
4892                 val64 = SET_UPDT_CLICKS(10) |
4893                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4894                 writeq(val64, &bar0->stat_cfg);
4895                 do {
4896                         udelay(100);
4897                         val64 = readq(&bar0->stat_cfg);
4898                         if (!(val64 & s2BIT(0)))
4899                                 break;
4900                         cnt++;
4901                         if (cnt == 5)
4902                                 break; /* Updt failed */
4903                 } while(1);
4904         }
4905 }
4906
4907 /**
4908  *  s2io_get_stats - Updates the device statistics structure.
4909  *  @dev : pointer to the device structure.
4910  *  Description:
4911  *  This function updates the device statistics structure in the s2io_nic
4912  *  structure and returns a pointer to the same.
4913  *  Return value:
4914  *  pointer to the updated net_device_stats structure.
4915  */
4916
4917 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4918 {
4919         struct s2io_nic *sp = netdev_priv(dev);
4920         struct mac_info *mac_control;
4921         struct config_param *config;
4922         int i;
4923
4924
4925         mac_control = &sp->mac_control;
4926         config = &sp->config;
4927
4928         /* Configure Stats for immediate updt */
4929         s2io_updt_stats(sp);
4930
4931         /* Using sp->stats as a staging area, because reset (due to mtu
4932            change, for example) will clear some hardware counters */
4933         dev->stats.tx_packets +=
4934                 le32_to_cpu(mac_control->stats_info->tmac_frms) - 
4935                 sp->stats.tx_packets;
4936         sp->stats.tx_packets =
4937                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4938         dev->stats.tx_errors +=
4939                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
4940                 sp->stats.tx_errors;
4941         sp->stats.tx_errors =
4942                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4943         dev->stats.rx_errors +=
4944                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
4945                 sp->stats.rx_errors;
4946         sp->stats.rx_errors =
4947                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4948         dev->stats.multicast =
4949                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) - 
4950                 sp->stats.multicast;
4951         sp->stats.multicast =
4952                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4953         dev->stats.rx_length_errors =
4954                 le64_to_cpu(mac_control->stats_info->rmac_long_frms) - 
4955                 sp->stats.rx_length_errors;
4956         sp->stats.rx_length_errors =
4957                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4958
4959         /* collect per-ring rx_packets and rx_bytes */
4960         dev->stats.rx_packets = dev->stats.rx_bytes = 0;
4961         for (i = 0; i < config->rx_ring_num; i++) {
4962                 struct ring_info *ring = &mac_control->rings[i];
4963
4964                 dev->stats.rx_packets += ring->rx_packets;
4965                 dev->stats.rx_bytes += ring->rx_bytes;
4966         }
4967
4968         return (&dev->stats);
4969 }
4970
4971 /**
4972  *  s2io_set_multicast - entry point for multicast address enable/disable.
4973  *  @dev : pointer to the device structure
4974  *  Description:
4975  *  This function is a driver entry point which gets called by the kernel
4976  *  whenever multicast addresses must be enabled/disabled. This also gets
4977  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4978  *  determine, if multicast address must be enabled or if promiscuous mode
4979  *  is to be disabled etc.
4980  *  Return value:
4981  *  void.
4982  */
4983
4984 static void s2io_set_multicast(struct net_device *dev)
4985 {
4986         int i, j, prev_cnt;
4987         struct dev_mc_list *mclist;
4988         struct s2io_nic *sp = netdev_priv(dev);
4989         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4990         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4991             0xfeffffffffffULL;
4992         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4993         void __iomem *add;
4994         struct config_param *config = &sp->config;
4995
4996         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4997                 /*  Enable all Multicast addresses */
4998                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4999                        &bar0->rmac_addr_data0_mem);
5000                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
5001                        &bar0->rmac_addr_data1_mem);
5002                 val64 = RMAC_ADDR_CMD_MEM_WE |
5003                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5004                     RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
5005                 writeq(val64, &bar0->rmac_addr_cmd_mem);
5006                 /* Wait till command completes */
5007                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5008                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5009                                         S2IO_BIT_RESET);
5010
5011                 sp->m_cast_flg = 1;
5012                 sp->all_multi_pos = config->max_mc_addr - 1;
5013         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
5014                 /*  Disable all Multicast addresses */
5015                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5016                        &bar0->rmac_addr_data0_mem);
5017                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
5018                        &bar0->rmac_addr_data1_mem);
5019                 val64 = RMAC_ADDR_CMD_MEM_WE |
5020                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5021                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
5022                 writeq(val64, &bar0->rmac_addr_cmd_mem);
5023                 /* Wait till command completes */
5024                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5025                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5026                                         S2IO_BIT_RESET);
5027
5028                 sp->m_cast_flg = 0;
5029                 sp->all_multi_pos = 0;
5030         }
5031
5032         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5033                 /*  Put the NIC into promiscuous mode */
5034                 add = &bar0->mac_cfg;
5035                 val64 = readq(&bar0->mac_cfg);
5036                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5037
5038                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5039                 writel((u32) val64, add);
5040                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5041                 writel((u32) (val64 >> 32), (add + 4));
5042
5043                 if (vlan_tag_strip != 1) {
5044                         val64 = readq(&bar0->rx_pa_cfg);
5045                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5046                         writeq(val64, &bar0->rx_pa_cfg);
5047                         sp->vlan_strip_flag = 0;
5048                 }
5049
5050                 val64 = readq(&bar0->mac_cfg);
5051                 sp->promisc_flg = 1;
5052                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5053                           dev->name);
5054         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5055                 /*  Remove the NIC from promiscuous mode */
5056                 add = &bar0->mac_cfg;
5057                 val64 = readq(&bar0->mac_cfg);
5058                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5059
5060                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5061                 writel((u32) val64, add);
5062                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5063                 writel((u32) (val64 >> 32), (add + 4));
5064
5065                 if (vlan_tag_strip != 0) {
5066                         val64 = readq(&bar0->rx_pa_cfg);
5067                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5068                         writeq(val64, &bar0->rx_pa_cfg);
5069                         sp->vlan_strip_flag = 1;
5070                 }
5071
5072                 val64 = readq(&bar0->mac_cfg);
5073                 sp->promisc_flg = 0;
5074                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5075                           dev->name);
5076         }
5077
5078         /*  Update individual M_CAST address list */
5079         if ((!sp->m_cast_flg) && dev->mc_count) {
5080                 if (dev->mc_count >
5081                     (config->max_mc_addr - config->max_mac_addr)) {
5082                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5083                                   dev->name);
5084                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5085                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5086                         return;
5087                 }
5088
5089                 prev_cnt = sp->mc_addr_count;
5090                 sp->mc_addr_count = dev->mc_count;
5091
5092                 /* Clear out the previous list of Mc in the H/W. */
5093                 for (i = 0; i < prev_cnt; i++) {
5094                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5095                                &bar0->rmac_addr_data0_mem);
5096                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5097                                 &bar0->rmac_addr_data1_mem);
5098                         val64 = RMAC_ADDR_CMD_MEM_WE |
5099                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5100                             RMAC_ADDR_CMD_MEM_OFFSET
5101                             (config->mc_start_offset + i);
5102                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5103
5104                         /* Wait for command completes */
5105                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5106                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5107                                         S2IO_BIT_RESET)) {
5108                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5109                                           dev->name);
5110                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5111                                 return;
5112                         }
5113                 }
5114
5115                 /* Create the new Rx filter list and update the same in H/W. */
5116                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5117                      i++, mclist = mclist->next) {
5118                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5119                                ETH_ALEN);
5120                         mac_addr = 0;
5121                         for (j = 0; j < ETH_ALEN; j++) {
5122                                 mac_addr |= mclist->dmi_addr[j];
5123                                 mac_addr <<= 8;
5124                         }
5125                         mac_addr >>= 8;
5126                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5127                                &bar0->rmac_addr_data0_mem);
5128                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5129                                 &bar0->rmac_addr_data1_mem);
5130                         val64 = RMAC_ADDR_CMD_MEM_WE |
5131                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5132                             RMAC_ADDR_CMD_MEM_OFFSET
5133                             (i + config->mc_start_offset);
5134                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5135
5136                         /* Wait for command completes */
5137                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5138                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5139                                         S2IO_BIT_RESET)) {
5140                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5141                                           dev->name);
5142                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5143                                 return;
5144                         }
5145                 }
5146         }
5147 }
5148
5149 /* read from CAM unicast & multicast addresses and store it in
5150  * def_mac_addr structure
5151  */
5152 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5153 {
5154         int offset;
5155         u64 mac_addr = 0x0;
5156         struct config_param *config = &sp->config;
5157
5158         /* store unicast & multicast mac addresses */
5159         for (offset = 0; offset < config->max_mc_addr; offset++) {
5160                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5161                 /* if read fails disable the entry */
5162                 if (mac_addr == FAILURE)
5163                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5164                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5165         }
5166 }
5167
5168 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5169 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5170 {
5171         int offset;
5172         struct config_param *config = &sp->config;
5173         /* restore unicast mac address */
5174         for (offset = 0; offset < config->max_mac_addr; offset++)
5175                 do_s2io_prog_unicast(sp->dev,
5176                         sp->def_mac_addr[offset].mac_addr);
5177
5178         /* restore multicast mac address */
5179         for (offset = config->mc_start_offset;
5180                 offset < config->max_mc_addr; offset++)
5181                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5182 }
5183
5184 /* add a multicast MAC address to CAM */
5185 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5186 {
5187         int i;
5188         u64 mac_addr = 0;
5189         struct config_param *config = &sp->config;
5190
5191         for (i = 0; i < ETH_ALEN; i++) {
5192                 mac_addr <<= 8;
5193                 mac_addr |= addr[i];
5194         }
5195         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5196                 return SUCCESS;
5197
5198         /* check if the multicast mac already preset in CAM */
5199         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5200                 u64 tmp64;
5201                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5202                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5203                         break;
5204
5205                 if (tmp64 == mac_addr)
5206                         return SUCCESS;
5207         }
5208         if (i == config->max_mc_addr) {
5209                 DBG_PRINT(ERR_DBG,
5210                         "CAM full no space left for multicast MAC\n");
5211                 return FAILURE;
5212         }
5213         /* Update the internal structure with this new mac address */
5214         do_s2io_copy_mac_addr(sp, i, mac_addr);
5215
5216         return (do_s2io_add_mac(sp, mac_addr, i));
5217 }
5218
5219 /* add MAC address to CAM */
5220 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5221 {
5222         u64 val64;
5223         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5224
5225         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5226                 &bar0->rmac_addr_data0_mem);
5227
5228         val64 =
5229                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5230                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5231         writeq(val64, &bar0->rmac_addr_cmd_mem);
5232
5233         /* Wait till command completes */
5234         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5235                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5236                 S2IO_BIT_RESET)) {
5237                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5238                 return FAILURE;
5239         }
5240         return SUCCESS;
5241 }
5242 /* deletes a specified unicast/multicast mac entry from CAM */
5243 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5244 {
5245         int offset;
5246         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5247         struct config_param *config = &sp->config;
5248
5249         for (offset = 1;
5250                 offset < config->max_mc_addr; offset++) {
5251                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5252                 if (tmp64 == addr) {
5253                         /* disable the entry by writing  0xffffffffffffULL */
5254                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5255                                 return FAILURE;
5256                         /* store the new mac list from CAM */
5257                         do_s2io_store_unicast_mc(sp);
5258                         return SUCCESS;
5259                 }
5260         }
5261         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5262                         (unsigned long long)addr);
5263         return FAILURE;
5264 }
5265
5266 /* read mac entries from CAM */
5267 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5268 {
5269         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5270         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5271
5272         /* read mac addr */
5273         val64 =
5274                 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5275                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5276         writeq(val64, &bar0->rmac_addr_cmd_mem);
5277
5278         /* Wait till command completes */
5279         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5280                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5281                 S2IO_BIT_RESET)) {
5282                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5283                 return FAILURE;
5284         }
5285         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5286         return (tmp64 >> 16);
5287 }
5288
5289 /**
5290  * s2io_set_mac_addr driver entry point
5291  */
5292
5293 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5294 {
5295         struct sockaddr *addr = p;
5296
5297         if (!is_valid_ether_addr(addr->sa_data))
5298                 return -EINVAL;
5299
5300         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5301
5302         /* store the MAC address in CAM */
5303         return (do_s2io_prog_unicast(dev, dev->dev_addr));
5304 }
5305 /**
5306  *  do_s2io_prog_unicast - Programs the Xframe mac address
5307  *  @dev : pointer to the device structure.
5308  *  @addr: a uchar pointer to the new mac address which is to be set.
5309  *  Description : This procedure will program the Xframe to receive
5310  *  frames with new Mac Address
5311  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5312  *  as defined in errno.h file on failure.
5313  */
5314
5315 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5316 {
5317         struct s2io_nic *sp = netdev_priv(dev);
5318         register u64 mac_addr = 0, perm_addr = 0;
5319         int i;
5320         u64 tmp64;
5321         struct config_param *config = &sp->config;
5322
5323         /*
5324         * Set the new MAC address as the new unicast filter and reflect this
5325         * change on the device address registered with the OS. It will be
5326         * at offset 0.
5327         */
5328         for (i = 0; i < ETH_ALEN; i++) {
5329                 mac_addr <<= 8;
5330                 mac_addr |= addr[i];
5331                 perm_addr <<= 8;
5332                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5333         }
5334
5335         /* check if the dev_addr is different than perm_addr */
5336         if (mac_addr == perm_addr)
5337                 return SUCCESS;
5338
5339         /* check if the mac already preset in CAM */
5340         for (i = 1; i < config->max_mac_addr; i++) {
5341                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5342                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5343                         break;
5344
5345                 if (tmp64 == mac_addr) {
5346                         DBG_PRINT(INFO_DBG,
5347                         "MAC addr:0x%llx already present in CAM\n",
5348                         (unsigned long long)mac_addr);
5349                         return SUCCESS;
5350                 }
5351         }
5352         if (i == config->max_mac_addr) {
5353                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5354                 return FAILURE;
5355         }
5356         /* Update the internal structure with this new mac address */
5357         do_s2io_copy_mac_addr(sp, i, mac_addr);
5358         return (do_s2io_add_mac(sp, mac_addr, i));
5359 }
5360
5361 /**
5362  * s2io_ethtool_sset - Sets different link parameters.
5363  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5364  * @info: pointer to the structure with parameters given by ethtool to set
5365  * link information.
5366  * Description:
5367  * The function sets different link parameters provided by the user onto
5368  * the NIC.
5369  * Return value:
5370  * 0 on success.
5371 */
5372
5373 static int s2io_ethtool_sset(struct net_device *dev,
5374                              struct ethtool_cmd *info)
5375 {
5376         struct s2io_nic *sp = netdev_priv(dev);
5377         if ((info->autoneg == AUTONEG_ENABLE) ||
5378             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5379                 return -EINVAL;
5380         else {
5381                 s2io_close(sp->dev);
5382                 s2io_open(sp->dev);
5383         }
5384
5385         return 0;
5386 }
5387
5388 /**
5389  * s2io_ethtol_gset - Return link specific information.
5390  * @sp : private member of the device structure, pointer to the
5391  *      s2io_nic structure.
5392  * @info : pointer to the structure with parameters given by ethtool
5393  * to return link information.
5394  * Description:
5395  * Returns link specific information like speed, duplex etc.. to ethtool.
5396  * Return value :
5397  * return 0 on success.
5398  */
5399
5400 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5401 {
5402         struct s2io_nic *sp = netdev_priv(dev);
5403         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5404         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5405         info->port = PORT_FIBRE;
5406
5407         /* info->transceiver */
5408         info->transceiver = XCVR_EXTERNAL;
5409
5410         if (netif_carrier_ok(sp->dev)) {
5411                 info->speed = 10000;
5412                 info->duplex = DUPLEX_FULL;
5413         } else {
5414                 info->speed = -1;
5415                 info->duplex = -1;
5416         }
5417
5418         info->autoneg = AUTONEG_DISABLE;
5419         return 0;
5420 }
5421
5422 /**
5423  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5424  * @sp : private member of the device structure, which is a pointer to the
5425  * s2io_nic structure.
5426  * @info : pointer to the structure with parameters given by ethtool to
5427  * return driver information.
5428  * Description:
5429  * Returns driver specefic information like name, version etc.. to ethtool.
5430  * Return value:
5431  *  void
5432  */
5433
5434 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5435                                   struct ethtool_drvinfo *info)
5436 {
5437         struct s2io_nic *sp = netdev_priv(dev);
5438
5439         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5440         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5441         strncpy(info->fw_version, "", sizeof(info->fw_version));
5442         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5443         info->regdump_len = XENA_REG_SPACE;
5444         info->eedump_len = XENA_EEPROM_SPACE;
5445 }
5446
5447 /**
5448  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5449  *  @sp: private member of the device structure, which is a pointer to the
5450  *  s2io_nic structure.
5451  *  @regs : pointer to the structure with parameters given by ethtool for
5452  *  dumping the registers.
5453  *  @reg_space: The input argumnet into which all the registers are dumped.
5454  *  Description:
5455  *  Dumps the entire register space of xFrame NIC into the user given
5456  *  buffer area.
5457  * Return value :
5458  * void .
5459 */
5460
5461 static void s2io_ethtool_gregs(struct net_device *dev,
5462                                struct ethtool_regs *regs, void *space)
5463 {
5464         int i;
5465         u64 reg;
5466         u8 *reg_space = (u8 *) space;
5467         struct s2io_nic *sp = netdev_priv(dev);
5468
5469         regs->len = XENA_REG_SPACE;
5470         regs->version = sp->pdev->subsystem_device;
5471
5472         for (i = 0; i < regs->len; i += 8) {
5473                 reg = readq(sp->bar0 + i);
5474                 memcpy((reg_space + i), &reg, 8);
5475         }
5476 }
5477
5478 /**
5479  *  s2io_phy_id  - timer function that alternates adapter LED.
5480  *  @data : address of the private member of the device structure, which
5481  *  is a pointer to the s2io_nic structure, provided as an u32.
5482  * Description: This is actually the timer function that alternates the
5483  * adapter LED bit of the adapter control bit to set/reset every time on
5484  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5485  *  once every second.
5486 */
5487 static void s2io_phy_id(unsigned long data)
5488 {
5489         struct s2io_nic *sp = (struct s2io_nic *) data;
5490         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5491         u64 val64 = 0;
5492         u16 subid;
5493
5494         subid = sp->pdev->subsystem_device;
5495         if ((sp->device_type == XFRAME_II_DEVICE) ||
5496                    ((subid & 0xFF) >= 0x07)) {
5497                 val64 = readq(&bar0->gpio_control);
5498                 val64 ^= GPIO_CTRL_GPIO_0;
5499                 writeq(val64, &bar0->gpio_control);
5500         } else {
5501                 val64 = readq(&bar0->adapter_control);
5502                 val64 ^= ADAPTER_LED_ON;
5503                 writeq(val64, &bar0->adapter_control);
5504         }
5505
5506         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5507 }
5508
5509 /**
5510  * s2io_ethtool_idnic - To physically identify the nic on the system.
5511  * @sp : private member of the device structure, which is a pointer to the
5512  * s2io_nic structure.
5513  * @id : pointer to the structure with identification parameters given by
5514  * ethtool.
5515  * Description: Used to physically identify the NIC on the system.
5516  * The Link LED will blink for a time specified by the user for
5517  * identification.
5518  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5519  * identification is possible only if it's link is up.
5520  * Return value:
5521  * int , returns 0 on success
5522  */
5523
5524 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5525 {
5526         u64 val64 = 0, last_gpio_ctrl_val;
5527         struct s2io_nic *sp = netdev_priv(dev);
5528         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5529         u16 subid;
5530
5531         subid = sp->pdev->subsystem_device;
5532         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5533         if ((sp->device_type == XFRAME_I_DEVICE) &&
5534                 ((subid & 0xFF) < 0x07)) {
5535                 val64 = readq(&bar0->adapter_control);
5536                 if (!(val64 & ADAPTER_CNTL_EN)) {
5537                         printk(KERN_ERR
5538                                "Adapter Link down, cannot blink LED\n");
5539                         return -EFAULT;
5540                 }
5541         }
5542         if (sp->id_timer.function == NULL) {
5543                 init_timer(&sp->id_timer);
5544                 sp->id_timer.function = s2io_phy_id;
5545                 sp->id_timer.data = (unsigned long) sp;
5546         }
5547         mod_timer(&sp->id_timer, jiffies);
5548         if (data)
5549                 msleep_interruptible(data * HZ);
5550         else
5551                 msleep_interruptible(MAX_FLICKER_TIME);
5552         del_timer_sync(&sp->id_timer);
5553
5554         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5555                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5556                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5557         }
5558
5559         return 0;
5560 }
5561
5562 static void s2io_ethtool_gringparam(struct net_device *dev,
5563                                     struct ethtool_ringparam *ering)
5564 {
5565         struct s2io_nic *sp = netdev_priv(dev);
5566         int i,tx_desc_count=0,rx_desc_count=0;
5567
5568         if (sp->rxd_mode == RXD_MODE_1)
5569                 ering->rx_max_pending = MAX_RX_DESC_1;
5570         else if (sp->rxd_mode == RXD_MODE_3B)
5571                 ering->rx_max_pending = MAX_RX_DESC_2;
5572
5573         ering->tx_max_pending = MAX_TX_DESC;
5574         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5575                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5576
5577         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5578         ering->tx_pending = tx_desc_count;
5579         rx_desc_count = 0;
5580         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5581                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5582
5583         ering->rx_pending = rx_desc_count;
5584
5585         ering->rx_mini_max_pending = 0;
5586         ering->rx_mini_pending = 0;
5587         if(sp->rxd_mode == RXD_MODE_1)
5588                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5589         else if (sp->rxd_mode == RXD_MODE_3B)
5590                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5591         ering->rx_jumbo_pending = rx_desc_count;
5592 }
5593
5594 /**
5595  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5596  * @sp : private member of the device structure, which is a pointer to the
5597  *      s2io_nic structure.
5598  * @ep : pointer to the structure with pause parameters given by ethtool.
5599  * Description:
5600  * Returns the Pause frame generation and reception capability of the NIC.
5601  * Return value:
5602  *  void
5603  */
5604 static void s2io_ethtool_getpause_data(struct net_device *dev,
5605                                        struct ethtool_pauseparam *ep)
5606 {
5607         u64 val64;
5608         struct s2io_nic *sp = netdev_priv(dev);
5609         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5610
5611         val64 = readq(&bar0->rmac_pause_cfg);
5612         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5613                 ep->tx_pause = true;
5614         if (val64 & RMAC_PAUSE_RX_ENABLE)
5615                 ep->rx_pause = true;
5616         ep->autoneg = false;
5617 }
5618
5619 /**
5620  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5621  * @sp : private member of the device structure, which is a pointer to the
5622  *      s2io_nic structure.
5623  * @ep : pointer to the structure with pause parameters given by ethtool.
5624  * Description:
5625  * It can be used to set or reset Pause frame generation or reception
5626  * support of the NIC.
5627  * Return value:
5628  * int, returns 0 on Success
5629  */
5630
5631 static int s2io_ethtool_setpause_data(struct net_device *dev,
5632                                struct ethtool_pauseparam *ep)
5633 {
5634         u64 val64;
5635         struct s2io_nic *sp = netdev_priv(dev);
5636         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5637
5638         val64 = readq(&bar0->rmac_pause_cfg);
5639         if (ep->tx_pause)
5640                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5641         else
5642                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5643         if (ep->rx_pause)
5644                 val64 |= RMAC_PAUSE_RX_ENABLE;
5645         else
5646                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5647         writeq(val64, &bar0->rmac_pause_cfg);
5648         return 0;
5649 }
5650
5651 /**
5652  * read_eeprom - reads 4 bytes of data from user given offset.
5653  * @sp : private member of the device structure, which is a pointer to the
5654  *      s2io_nic structure.
5655  * @off : offset at which the data must be written
5656  * @data : Its an output parameter where the data read at the given
5657  *      offset is stored.
5658  * Description:
5659  * Will read 4 bytes of data from the user given offset and return the
5660  * read data.
5661  * NOTE: Will allow to read only part of the EEPROM visible through the
5662  *   I2C bus.
5663  * Return value:
5664  *  -1 on failure and 0 on success.
5665  */
5666
5667 #define S2IO_DEV_ID             5
5668 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5669 {
5670         int ret = -1;
5671         u32 exit_cnt = 0;
5672         u64 val64;
5673         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5674
5675         if (sp->device_type == XFRAME_I_DEVICE) {
5676                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5677                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5678                     I2C_CONTROL_CNTL_START;
5679                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5680
5681                 while (exit_cnt < 5) {
5682                         val64 = readq(&bar0->i2c_control);
5683                         if (I2C_CONTROL_CNTL_END(val64)) {
5684                                 *data = I2C_CONTROL_GET_DATA(val64);
5685                                 ret = 0;
5686                                 break;
5687                         }
5688                         msleep(50);
5689                         exit_cnt++;
5690                 }
5691         }
5692
5693         if (sp->device_type == XFRAME_II_DEVICE) {
5694                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5695                         SPI_CONTROL_BYTECNT(0x3) |
5696                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5697                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5698                 val64 |= SPI_CONTROL_REQ;
5699                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5700                 while (exit_cnt < 5) {
5701                         val64 = readq(&bar0->spi_control);
5702                         if (val64 & SPI_CONTROL_NACK) {
5703                                 ret = 1;
5704                                 break;
5705                         } else if (val64 & SPI_CONTROL_DONE) {
5706                                 *data = readq(&bar0->spi_data);
5707                                 *data &= 0xffffff;
5708                                 ret = 0;
5709                                 break;
5710                         }
5711                         msleep(50);
5712                         exit_cnt++;
5713                 }
5714         }
5715         return ret;
5716 }
5717
5718 /**
5719  *  write_eeprom - actually writes the relevant part of the data value.
5720  *  @sp : private member of the device structure, which is a pointer to the
5721  *       s2io_nic structure.
5722  *  @off : offset at which the data must be written
5723  *  @data : The data that is to be written
5724  *  @cnt : Number of bytes of the data that are actually to be written into
5725  *  the Eeprom. (max of 3)
5726  * Description:
5727  *  Actually writes the relevant part of the data value into the Eeprom
5728  *  through the I2C bus.
5729  * Return value:
5730  *  0 on success, -1 on failure.
5731  */
5732
5733 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5734 {
5735         int exit_cnt = 0, ret = -1;
5736         u64 val64;
5737         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5738
5739         if (sp->device_type == XFRAME_I_DEVICE) {
5740                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5741                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5742                     I2C_CONTROL_CNTL_START;
5743                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5744
5745                 while (exit_cnt < 5) {
5746                         val64 = readq(&bar0->i2c_control);
5747                         if (I2C_CONTROL_CNTL_END(val64)) {
5748                                 if (!(val64 & I2C_CONTROL_NACK))
5749                                         ret = 0;
5750                                 break;
5751                         }
5752                         msleep(50);
5753                         exit_cnt++;
5754                 }
5755         }
5756
5757         if (sp->device_type == XFRAME_II_DEVICE) {
5758                 int write_cnt = (cnt == 8) ? 0 : cnt;
5759                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5760
5761                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5762                         SPI_CONTROL_BYTECNT(write_cnt) |
5763                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5764                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5765                 val64 |= SPI_CONTROL_REQ;
5766                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5767                 while (exit_cnt < 5) {
5768                         val64 = readq(&bar0->spi_control);
5769                         if (val64 & SPI_CONTROL_NACK) {
5770                                 ret = 1;
5771                                 break;
5772                         } else if (val64 & SPI_CONTROL_DONE) {
5773                                 ret = 0;
5774                                 break;
5775                         }
5776                         msleep(50);
5777                         exit_cnt++;
5778                 }
5779         }
5780         return ret;
5781 }
5782 static void s2io_vpd_read(struct s2io_nic *nic)
5783 {
5784         u8 *vpd_data;
5785         u8 data;
5786         int i=0, cnt, fail = 0;
5787         int vpd_addr = 0x80;
5788
5789         if (nic->device_type == XFRAME_II_DEVICE) {
5790                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5791                 vpd_addr = 0x80;
5792         }
5793         else {
5794                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5795                 vpd_addr = 0x50;
5796         }
5797         strcpy(nic->serial_num, "NOT AVAILABLE");
5798
5799         vpd_data = kmalloc(256, GFP_KERNEL);
5800         if (!vpd_data) {
5801                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5802                 return;
5803         }
5804         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5805
5806         for (i = 0; i < 256; i +=4 ) {
5807                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5808                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5809                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5810                 for (cnt = 0; cnt <5; cnt++) {
5811                         msleep(2);
5812                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5813                         if (data == 0x80)
5814                                 break;
5815                 }
5816                 if (cnt >= 5) {
5817                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5818                         fail = 1;
5819                         break;
5820                 }
5821                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5822                                       (u32 *)&vpd_data[i]);
5823         }
5824
5825         if(!fail) {
5826                 /* read serial number of adapter */
5827                 for (cnt = 0; cnt < 256; cnt++) {
5828                 if ((vpd_data[cnt] == 'S') &&
5829                         (vpd_data[cnt+1] == 'N') &&
5830                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5831                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5832                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5833                                         vpd_data[cnt+2]);
5834                                 break;
5835                         }
5836                 }
5837         }
5838
5839         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5840                 memset(nic->product_name, 0, vpd_data[1]);
5841                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5842         }
5843         kfree(vpd_data);
5844         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5845 }
5846
5847 /**
5848  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5849  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5850  *  @eeprom : pointer to the user level structure provided by ethtool,
5851  *  containing all relevant information.
5852  *  @data_buf : user defined value to be written into Eeprom.
5853  *  Description: Reads the values stored in the Eeprom at given offset
5854  *  for a given length. Stores these values int the input argument data
5855  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5856  *  Return value:
5857  *  int  0 on success
5858  */
5859
5860 static int s2io_ethtool_geeprom(struct net_device *dev,
5861                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5862 {
5863         u32 i, valid;
5864         u64 data;
5865         struct s2io_nic *sp = netdev_priv(dev);
5866
5867         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5868
5869         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5870                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5871
5872         for (i = 0; i < eeprom->len; i += 4) {
5873                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5874                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5875                         return -EFAULT;
5876                 }
5877                 valid = INV(data);
5878                 memcpy((data_buf + i), &valid, 4);
5879         }
5880         return 0;
5881 }
5882
5883 /**
5884  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5885  *  @sp : private member of the device structure, which is a pointer to the
5886  *  s2io_nic structure.
5887  *  @eeprom : pointer to the user level structure provided by ethtool,
5888  *  containing all relevant information.
5889  *  @data_buf ; user defined value to be written into Eeprom.
5890  *  Description:
5891  *  Tries to write the user provided value in the Eeprom, at the offset
5892  *  given by the user.
5893  *  Return value:
5894  *  0 on success, -EFAULT on failure.
5895  */
5896
5897 static int s2io_ethtool_seeprom(struct net_device *dev,
5898                                 struct ethtool_eeprom *eeprom,
5899                                 u8 * data_buf)
5900 {
5901         int len = eeprom->len, cnt = 0;
5902         u64 valid = 0, data;
5903         struct s2io_nic *sp = netdev_priv(dev);
5904
5905         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5906                 DBG_PRINT(ERR_DBG,
5907                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5908                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5909                           eeprom->magic);
5910                 return -EFAULT;
5911         }
5912
5913         while (len) {
5914                 data = (u32) data_buf[cnt] & 0x000000FF;
5915                 if (data) {
5916                         valid = (u32) (data << 24);
5917                 } else
5918                         valid = data;
5919
5920                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5921                         DBG_PRINT(ERR_DBG,
5922                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5923                         DBG_PRINT(ERR_DBG,
5924                                   "write into the specified offset\n");
5925                         return -EFAULT;
5926                 }
5927                 cnt++;
5928                 len--;
5929         }
5930
5931         return 0;
5932 }
5933
5934 /**
5935  * s2io_register_test - reads and writes into all clock domains.
5936  * @sp : private member of the device structure, which is a pointer to the
5937  * s2io_nic structure.
5938  * @data : variable that returns the result of each of the test conducted b
5939  * by the driver.
5940  * Description:
5941  * Read and write into all clock domains. The NIC has 3 clock domains,
5942  * see that registers in all the three regions are accessible.
5943  * Return value:
5944  * 0 on success.
5945  */
5946
5947 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5948 {
5949         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5950         u64 val64 = 0, exp_val;
5951         int fail = 0;
5952
5953         val64 = readq(&bar0->pif_rd_swapper_fb);
5954         if (val64 != 0x123456789abcdefULL) {
5955                 fail = 1;
5956                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5957         }
5958
5959         val64 = readq(&bar0->rmac_pause_cfg);
5960         if (val64 != 0xc000ffff00000000ULL) {
5961                 fail = 1;
5962                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5963         }
5964
5965         val64 = readq(&bar0->rx_queue_cfg);
5966         if (sp->device_type == XFRAME_II_DEVICE)
5967                 exp_val = 0x0404040404040404ULL;
5968         else
5969                 exp_val = 0x0808080808080808ULL;
5970         if (val64 != exp_val) {
5971                 fail = 1;
5972                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5973         }
5974
5975         val64 = readq(&bar0->xgxs_efifo_cfg);
5976         if (val64 != 0x000000001923141EULL) {
5977                 fail = 1;
5978                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5979         }
5980
5981         val64 = 0x5A5A5A5A5A5A5A5AULL;
5982         writeq(val64, &bar0->xmsi_data);
5983         val64 = readq(&bar0->xmsi_data);
5984         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5985                 fail = 1;
5986                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5987         }
5988
5989         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5990         writeq(val64, &bar0->xmsi_data);
5991         val64 = readq(&bar0->xmsi_data);
5992         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5993                 fail = 1;
5994                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5995         }
5996
5997         *data = fail;
5998         return fail;
5999 }
6000
6001 /**
6002  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
6003  * @sp : private member of the device structure, which is a pointer to the
6004  * s2io_nic structure.
6005  * @data:variable that returns the result of each of the test conducted by
6006  * the driver.
6007  * Description:
6008  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
6009  * register.
6010  * Return value:
6011  * 0 on success.
6012  */
6013
6014 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
6015 {
6016         int fail = 0;
6017         u64 ret_data, org_4F0, org_7F0;
6018         u8 saved_4F0 = 0, saved_7F0 = 0;
6019         struct net_device *dev = sp->dev;
6020
6021         /* Test Write Error at offset 0 */
6022         /* Note that SPI interface allows write access to all areas
6023          * of EEPROM. Hence doing all negative testing only for Xframe I.
6024          */
6025         if (sp->device_type == XFRAME_I_DEVICE)
6026                 if (!write_eeprom(sp, 0, 0, 3))
6027                         fail = 1;
6028
6029         /* Save current values at offsets 0x4F0 and 0x7F0 */
6030         if (!read_eeprom(sp, 0x4F0, &org_4F0))
6031                 saved_4F0 = 1;
6032         if (!read_eeprom(sp, 0x7F0, &org_7F0))
6033                 saved_7F0 = 1;
6034
6035         /* Test Write at offset 4f0 */
6036         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
6037                 fail = 1;
6038         if (read_eeprom(sp, 0x4F0, &ret_data))
6039                 fail = 1;
6040
6041         if (ret_data != 0x012345) {
6042                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6043                         "Data written %llx Data read %llx\n",
6044                         dev->name, (unsigned long long)0x12345,
6045                         (unsigned long long)ret_data);
6046                 fail = 1;
6047         }
6048
6049         /* Reset the EEPROM data go FFFF */
6050         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6051
6052         /* Test Write Request Error at offset 0x7c */
6053         if (sp->device_type == XFRAME_I_DEVICE)
6054                 if (!write_eeprom(sp, 0x07C, 0, 3))
6055                         fail = 1;
6056
6057         /* Test Write Request at offset 0x7f0 */
6058         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6059                 fail = 1;
6060         if (read_eeprom(sp, 0x7F0, &ret_data))
6061                 fail = 1;
6062
6063         if (ret_data != 0x012345) {
6064                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6065                         "Data written %llx Data read %llx\n",
6066                         dev->name, (unsigned long long)0x12345,
6067                         (unsigned long long)ret_data);
6068                 fail = 1;
6069         }
6070
6071         /* Reset the EEPROM data go FFFF */
6072         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6073
6074         if (sp->device_type == XFRAME_I_DEVICE) {
6075                 /* Test Write Error at offset 0x80 */
6076                 if (!write_eeprom(sp, 0x080, 0, 3))
6077                         fail = 1;
6078
6079                 /* Test Write Error at offset 0xfc */
6080                 if (!write_eeprom(sp, 0x0FC, 0, 3))
6081                         fail = 1;
6082
6083                 /* Test Write Error at offset 0x100 */
6084                 if (!write_eeprom(sp, 0x100, 0, 3))
6085                         fail = 1;
6086
6087                 /* Test Write Error at offset 4ec */
6088                 if (!write_eeprom(sp, 0x4EC, 0, 3))
6089                         fail = 1;
6090         }
6091
6092         /* Restore values at offsets 0x4F0 and 0x7F0 */
6093         if (saved_4F0)
6094                 write_eeprom(sp, 0x4F0, org_4F0, 3);
6095         if (saved_7F0)
6096                 write_eeprom(sp, 0x7F0, org_7F0, 3);
6097
6098         *data = fail;
6099         return fail;
6100 }
6101
6102 /**
6103  * s2io_bist_test - invokes the MemBist test of the card .
6104  * @sp : private member of the device structure, which is a pointer to the
6105  * s2io_nic structure.
6106  * @data:variable that returns the result of each of the test conducted by
6107  * the driver.
6108  * Description:
6109  * This invokes the MemBist test of the card. We give around
6110  * 2 secs time for the Test to complete. If it's still not complete
6111  * within this peiod, we consider that the test failed.
6112  * Return value:
6113  * 0 on success and -1 on failure.
6114  */
6115
6116 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6117 {
6118         u8 bist = 0;
6119         int cnt = 0, ret = -1;
6120
6121         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6122         bist |= PCI_BIST_START;
6123         pci_write_config_word(sp->pdev, PCI_BIST, bist);
6124
6125         while (cnt < 20) {
6126                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6127                 if (!(bist & PCI_BIST_START)) {
6128                         *data = (bist & PCI_BIST_CODE_MASK);
6129                         ret = 0;
6130                         break;
6131                 }
6132                 msleep(100);
6133                 cnt++;
6134         }
6135
6136         return ret;
6137 }
6138
6139 /**
6140  * s2io-link_test - verifies the link state of the nic
6141  * @sp ; private member of the device structure, which is a pointer to the
6142  * s2io_nic structure.
6143  * @data: variable that returns the result of each of the test conducted by
6144  * the driver.
6145  * Description:
6146  * The function verifies the link state of the NIC and updates the input
6147  * argument 'data' appropriately.
6148  * Return value:
6149  * 0 on success.
6150  */
6151
6152 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6153 {
6154         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6155         u64 val64;
6156
6157         val64 = readq(&bar0->adapter_status);
6158         if(!(LINK_IS_UP(val64)))
6159                 *data = 1;
6160         else
6161                 *data = 0;
6162
6163         return *data;
6164 }
6165
6166 /**
6167  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6168  * @sp - private member of the device structure, which is a pointer to the
6169  * s2io_nic structure.
6170  * @data - variable that returns the result of each of the test
6171  * conducted by the driver.
6172  * Description:
6173  *  This is one of the offline test that tests the read and write
6174  *  access to the RldRam chip on the NIC.
6175  * Return value:
6176  *  0 on success.
6177  */
6178
6179 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6180 {
6181         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6182         u64 val64;
6183         int cnt, iteration = 0, test_fail = 0;
6184
6185         val64 = readq(&bar0->adapter_control);
6186         val64 &= ~ADAPTER_ECC_EN;
6187         writeq(val64, &bar0->adapter_control);
6188
6189         val64 = readq(&bar0->mc_rldram_test_ctrl);
6190         val64 |= MC_RLDRAM_TEST_MODE;
6191         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6192
6193         val64 = readq(&bar0->mc_rldram_mrs);
6194         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6195         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6196
6197         val64 |= MC_RLDRAM_MRS_ENABLE;
6198         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6199
6200         while (iteration < 2) {
6201                 val64 = 0x55555555aaaa0000ULL;
6202                 if (iteration == 1) {
6203                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6204                 }
6205                 writeq(val64, &bar0->mc_rldram_test_d0);
6206
6207                 val64 = 0xaaaa5a5555550000ULL;
6208                 if (iteration == 1) {
6209                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6210                 }
6211                 writeq(val64, &bar0->mc_rldram_test_d1);
6212
6213                 val64 = 0x55aaaaaaaa5a0000ULL;
6214                 if (iteration == 1) {
6215                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6216                 }
6217                 writeq(val64, &bar0->mc_rldram_test_d2);
6218
6219                 val64 = (u64) (0x0000003ffffe0100ULL);
6220                 writeq(val64, &bar0->mc_rldram_test_add);
6221
6222                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6223                         MC_RLDRAM_TEST_GO;
6224                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6225
6226                 for (cnt = 0; cnt < 5; cnt++) {
6227                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6228                         if (val64 & MC_RLDRAM_TEST_DONE)
6229                                 break;
6230                         msleep(200);
6231                 }
6232
6233                 if (cnt == 5)
6234                         break;
6235
6236                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6237                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6238
6239                 for (cnt = 0; cnt < 5; cnt++) {
6240                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6241                         if (val64 & MC_RLDRAM_TEST_DONE)
6242                                 break;
6243                         msleep(500);
6244                 }
6245
6246                 if (cnt == 5)
6247                         break;
6248
6249                 val64 = readq(&bar0->mc_rldram_test_ctrl);
6250                 if (!(val64 & MC_RLDRAM_TEST_PASS))
6251                         test_fail = 1;
6252
6253                 iteration++;
6254         }
6255
6256         *data = test_fail;
6257
6258         /* Bring the adapter out of test mode */
6259         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6260
6261         return test_fail;
6262 }
6263
6264 /**
6265  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6266  *  @sp : private member of the device structure, which is a pointer to the
6267  *  s2io_nic structure.
6268  *  @ethtest : pointer to a ethtool command specific structure that will be
6269  *  returned to the user.
6270  *  @data : variable that returns the result of each of the test
6271  * conducted by the driver.
6272  * Description:
6273  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6274  *  the health of the card.
6275  * Return value:
6276  *  void
6277  */
6278
6279 static void s2io_ethtool_test(struct net_device *dev,
6280                               struct ethtool_test *ethtest,
6281                               uint64_t * data)
6282 {
6283         struct s2io_nic *sp = netdev_priv(dev);
6284         int orig_state = netif_running(sp->dev);
6285
6286         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6287                 /* Offline Tests. */
6288                 if (orig_state)
6289                         s2io_close(sp->dev);
6290
6291                 if (s2io_register_test(sp, &data[0]))
6292                         ethtest->flags |= ETH_TEST_FL_FAILED;
6293
6294                 s2io_reset(sp);
6295
6296                 if (s2io_rldram_test(sp, &data[3]))
6297                         ethtest->flags |= ETH_TEST_FL_FAILED;
6298
6299                 s2io_reset(sp);
6300
6301                 if (s2io_eeprom_test(sp, &data[1]))
6302                         ethtest->flags |= ETH_TEST_FL_FAILED;
6303
6304                 if (s2io_bist_test(sp, &data[4]))
6305                         ethtest->flags |= ETH_TEST_FL_FAILED;
6306
6307                 if (orig_state)
6308                         s2io_open(sp->dev);
6309
6310                 data[2] = 0;
6311         } else {
6312                 /* Online Tests. */
6313                 if (!orig_state) {
6314                         DBG_PRINT(ERR_DBG,
6315                                   "%s: is not up, cannot run test\n",
6316                                   dev->name);
6317                         data[0] = -1;
6318                         data[1] = -1;
6319                         data[2] = -1;
6320                         data[3] = -1;
6321                         data[4] = -1;
6322                 }
6323
6324                 if (s2io_link_test(sp, &data[2]))
6325                         ethtest->flags |= ETH_TEST_FL_FAILED;
6326
6327                 data[0] = 0;
6328                 data[1] = 0;
6329                 data[3] = 0;
6330                 data[4] = 0;
6331         }
6332 }
6333
6334 static void s2io_get_ethtool_stats(struct net_device *dev,
6335                                    struct ethtool_stats *estats,
6336                                    u64 * tmp_stats)
6337 {
6338         int i = 0, k;
6339         struct s2io_nic *sp = netdev_priv(dev);
6340         struct stat_block *stat_info = sp->mac_control.stats_info;
6341
6342         s2io_updt_stats(sp);
6343         tmp_stats[i++] =
6344                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
6345                 le32_to_cpu(stat_info->tmac_frms);
6346         tmp_stats[i++] =
6347                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6348                 le32_to_cpu(stat_info->tmac_data_octets);
6349         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6350         tmp_stats[i++] =
6351                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6352                 le32_to_cpu(stat_info->tmac_mcst_frms);
6353         tmp_stats[i++] =
6354                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6355                 le32_to_cpu(stat_info->tmac_bcst_frms);
6356         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6357         tmp_stats[i++] =
6358                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6359                 le32_to_cpu(stat_info->tmac_ttl_octets);
6360         tmp_stats[i++] =
6361                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6362                 le32_to_cpu(stat_info->tmac_ucst_frms);
6363         tmp_stats[i++] =
6364                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6365                 le32_to_cpu(stat_info->tmac_nucst_frms);
6366         tmp_stats[i++] =
6367                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6368                 le32_to_cpu(stat_info->tmac_any_err_frms);
6369         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6370         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6371         tmp_stats[i++] =
6372                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6373                 le32_to_cpu(stat_info->tmac_vld_ip);
6374         tmp_stats[i++] =
6375                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6376                 le32_to_cpu(stat_info->tmac_drop_ip);
6377         tmp_stats[i++] =
6378                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6379                 le32_to_cpu(stat_info->tmac_icmp);
6380         tmp_stats[i++] =
6381                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6382                 le32_to_cpu(stat_info->tmac_rst_tcp);
6383         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6384         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6385                 le32_to_cpu(stat_info->tmac_udp);
6386         tmp_stats[i++] =
6387                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6388                 le32_to_cpu(stat_info->rmac_vld_frms);
6389         tmp_stats[i++] =
6390                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6391                 le32_to_cpu(stat_info->rmac_data_octets);
6392         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6393         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6394         tmp_stats[i++] =
6395                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6396                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6397         tmp_stats[i++] =
6398                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6399                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6400         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6401         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6402         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6403         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6404         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6405         tmp_stats[i++] =
6406                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6407                 le32_to_cpu(stat_info->rmac_ttl_octets);
6408         tmp_stats[i++] =
6409                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6410                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6411         tmp_stats[i++] =
6412                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6413                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6414         tmp_stats[i++] =
6415                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6416                 le32_to_cpu(stat_info->rmac_discarded_frms);
6417         tmp_stats[i++] =
6418                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6419                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6420         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6421         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6422         tmp_stats[i++] =
6423                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6424                 le32_to_cpu(stat_info->rmac_usized_frms);
6425         tmp_stats[i++] =
6426                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6427                 le32_to_cpu(stat_info->rmac_osized_frms);
6428         tmp_stats[i++] =
6429                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6430                 le32_to_cpu(stat_info->rmac_frag_frms);
6431         tmp_stats[i++] =
6432                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6433                 le32_to_cpu(stat_info->rmac_jabber_frms);
6434         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6435         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6436         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6437         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6438         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6439         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6440         tmp_stats[i++] =
6441                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6442                 le32_to_cpu(stat_info->rmac_ip);
6443         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6444         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6445         tmp_stats[i++] =
6446                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6447                 le32_to_cpu(stat_info->rmac_drop_ip);
6448         tmp_stats[i++] =
6449                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6450                 le32_to_cpu(stat_info->rmac_icmp);
6451         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6452         tmp_stats[i++] =
6453                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6454                 le32_to_cpu(stat_info->rmac_udp);
6455         tmp_stats[i++] =
6456                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6457                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6458         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6459         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6460         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6461         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6462         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6463         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6464         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6465         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6466         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6467         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6468         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6469         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6470         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6471         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6472         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6473         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6474         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6475         tmp_stats[i++] =
6476                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6477                 le32_to_cpu(stat_info->rmac_pause_cnt);
6478         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6479         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6480         tmp_stats[i++] =
6481                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6482                 le32_to_cpu(stat_info->rmac_accepted_ip);
6483         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6484         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6485         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6486         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6487         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6488         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6489         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6490         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6491         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6492         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6493         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6494         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6495         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6496         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6497         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6498         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6499         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6500         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6501         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6502
6503         /* Enhanced statistics exist only for Hercules */
6504         if(sp->device_type == XFRAME_II_DEVICE) {
6505                 tmp_stats[i++] =
6506                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6507                 tmp_stats[i++] =
6508                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6509                 tmp_stats[i++] =
6510                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6511                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6512                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6513                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6514                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6515                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6516                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6517                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6518                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6519                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6520                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6521                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6522                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6523                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6524         }
6525
6526         tmp_stats[i++] = 0;
6527         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6528         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6529         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6530         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6531         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6532         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6533         for (k = 0; k < MAX_RX_RINGS; k++)
6534                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6535         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6536         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6537         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6538         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6539         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6540         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6541         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6542         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6543         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6544         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6545         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6546         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6547         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6548         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6549         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6550         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6551         if (stat_info->sw_stat.num_aggregations) {
6552                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6553                 int count = 0;
6554                 /*
6555                  * Since 64-bit divide does not work on all platforms,
6556                  * do repeated subtraction.
6557                  */
6558                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6559                         tmp -= stat_info->sw_stat.num_aggregations;
6560                         count++;
6561                 }
6562                 tmp_stats[i++] = count;
6563         }
6564         else
6565                 tmp_stats[i++] = 0;
6566         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6567         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6568         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6569         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6570         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6571         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6572         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6573         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6574         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6575
6576         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6577         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6578         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6579         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6580         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6581
6582         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6583         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6584         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6585         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6586         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6587         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6588         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6589         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6590         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6591         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6592         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6593         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6594         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6595         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6596         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6597         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6598         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6599         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6600         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6601         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6602         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6603         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6604         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6605         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6606         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6607         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6608 }
6609
6610 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6611 {
6612         return (XENA_REG_SPACE);
6613 }
6614
6615
6616 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6617 {
6618         struct s2io_nic *sp = netdev_priv(dev);
6619
6620         return (sp->rx_csum);
6621 }
6622
6623 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6624 {
6625         struct s2io_nic *sp = netdev_priv(dev);
6626
6627         if (data)
6628                 sp->rx_csum = 1;
6629         else
6630                 sp->rx_csum = 0;
6631
6632         return 0;
6633 }
6634
6635 static int s2io_get_eeprom_len(struct net_device *dev)
6636 {
6637         return (XENA_EEPROM_SPACE);
6638 }
6639
6640 static int s2io_get_sset_count(struct net_device *dev, int sset)
6641 {
6642         struct s2io_nic *sp = netdev_priv(dev);
6643
6644         switch (sset) {
6645         case ETH_SS_TEST:
6646                 return S2IO_TEST_LEN;
6647         case ETH_SS_STATS:
6648                 switch(sp->device_type) {
6649                 case XFRAME_I_DEVICE:
6650                         return XFRAME_I_STAT_LEN;
6651                 case XFRAME_II_DEVICE:
6652                         return XFRAME_II_STAT_LEN;
6653                 default:
6654                         return 0;
6655                 }
6656         default:
6657                 return -EOPNOTSUPP;
6658         }
6659 }
6660
6661 static void s2io_ethtool_get_strings(struct net_device *dev,
6662                                      u32 stringset, u8 * data)
6663 {
6664         int stat_size = 0;
6665         struct s2io_nic *sp = netdev_priv(dev);
6666
6667         switch (stringset) {
6668         case ETH_SS_TEST:
6669                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6670                 break;
6671         case ETH_SS_STATS:
6672                 stat_size = sizeof(ethtool_xena_stats_keys);
6673                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6674                 if(sp->device_type == XFRAME_II_DEVICE) {
6675                         memcpy(data + stat_size,
6676                                 &ethtool_enhanced_stats_keys,
6677                                 sizeof(ethtool_enhanced_stats_keys));
6678                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6679                 }
6680
6681                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6682                         sizeof(ethtool_driver_stats_keys));
6683         }
6684 }
6685
6686 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6687 {
6688         if (data)
6689                 dev->features |= NETIF_F_IP_CSUM;
6690         else
6691                 dev->features &= ~NETIF_F_IP_CSUM;
6692
6693         return 0;
6694 }
6695
6696 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6697 {
6698         return (dev->features & NETIF_F_TSO) != 0;
6699 }
6700 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6701 {
6702         if (data)
6703                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6704         else
6705                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6706
6707         return 0;
6708 }
6709
6710 static const struct ethtool_ops netdev_ethtool_ops = {
6711         .get_settings = s2io_ethtool_gset,
6712         .set_settings = s2io_ethtool_sset,
6713         .get_drvinfo = s2io_ethtool_gdrvinfo,
6714         .get_regs_len = s2io_ethtool_get_regs_len,
6715         .get_regs = s2io_ethtool_gregs,
6716         .get_link = ethtool_op_get_link,
6717         .get_eeprom_len = s2io_get_eeprom_len,
6718         .get_eeprom = s2io_ethtool_geeprom,
6719         .set_eeprom = s2io_ethtool_seeprom,
6720         .get_ringparam = s2io_ethtool_gringparam,
6721         .get_pauseparam = s2io_ethtool_getpause_data,
6722         .set_pauseparam = s2io_ethtool_setpause_data,
6723         .get_rx_csum = s2io_ethtool_get_rx_csum,
6724         .set_rx_csum = s2io_ethtool_set_rx_csum,
6725         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6726         .set_sg = ethtool_op_set_sg,
6727         .get_tso = s2io_ethtool_op_get_tso,
6728         .set_tso = s2io_ethtool_op_set_tso,
6729         .set_ufo = ethtool_op_set_ufo,
6730         .self_test = s2io_ethtool_test,
6731         .get_strings = s2io_ethtool_get_strings,
6732         .phys_id = s2io_ethtool_idnic,
6733         .get_ethtool_stats = s2io_get_ethtool_stats,
6734         .get_sset_count = s2io_get_sset_count,
6735 };
6736
6737 /**
6738  *  s2io_ioctl - Entry point for the Ioctl
6739  *  @dev :  Device pointer.
6740  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6741  *  a proprietary structure used to pass information to the driver.
6742  *  @cmd :  This is used to distinguish between the different commands that
6743  *  can be passed to the IOCTL functions.
6744  *  Description:
6745  *  Currently there are no special functionality supported in IOCTL, hence
6746  *  function always return EOPNOTSUPPORTED
6747  */
6748
6749 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6750 {
6751         return -EOPNOTSUPP;
6752 }
6753
6754 /**
6755  *  s2io_change_mtu - entry point to change MTU size for the device.
6756  *   @dev : device pointer.
6757  *   @new_mtu : the new MTU size for the device.
6758  *   Description: A driver entry point to change MTU size for the device.
6759  *   Before changing the MTU the device must be stopped.
6760  *  Return value:
6761  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6762  *   file on failure.
6763  */
6764
6765 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6766 {
6767         struct s2io_nic *sp = netdev_priv(dev);
6768         int ret = 0;
6769
6770         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6771                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6772                           dev->name);
6773                 return -EPERM;
6774         }
6775
6776         dev->mtu = new_mtu;
6777         if (netif_running(dev)) {
6778                 s2io_stop_all_tx_queue(sp);
6779                 s2io_card_down(sp);
6780                 ret = s2io_card_up(sp);
6781                 if (ret) {
6782                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6783                                   __func__);
6784                         return ret;
6785                 }
6786                 s2io_wake_all_tx_queue(sp);
6787         } else { /* Device is down */
6788                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6789                 u64 val64 = new_mtu;
6790
6791                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6792         }
6793
6794         return ret;
6795 }
6796
6797 /**
6798  * s2io_set_link - Set the LInk status
6799  * @data: long pointer to device private structue
6800  * Description: Sets the link status for the adapter
6801  */
6802
6803 static void s2io_set_link(struct work_struct *work)
6804 {
6805         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6806         struct net_device *dev = nic->dev;
6807         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6808         register u64 val64;
6809         u16 subid;
6810
6811         rtnl_lock();
6812
6813         if (!netif_running(dev))
6814                 goto out_unlock;
6815
6816         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6817                 /* The card is being reset, no point doing anything */
6818                 goto out_unlock;
6819         }
6820
6821         subid = nic->pdev->subsystem_device;
6822         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6823                 /*
6824                  * Allow a small delay for the NICs self initiated
6825                  * cleanup to complete.
6826                  */
6827                 msleep(100);
6828         }
6829
6830         val64 = readq(&bar0->adapter_status);
6831         if (LINK_IS_UP(val64)) {
6832                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6833                         if (verify_xena_quiescence(nic)) {
6834                                 val64 = readq(&bar0->adapter_control);
6835                                 val64 |= ADAPTER_CNTL_EN;
6836                                 writeq(val64, &bar0->adapter_control);
6837                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6838                                         nic->device_type, subid)) {
6839                                         val64 = readq(&bar0->gpio_control);
6840                                         val64 |= GPIO_CTRL_GPIO_0;
6841                                         writeq(val64, &bar0->gpio_control);
6842                                         val64 = readq(&bar0->gpio_control);
6843                                 } else {
6844                                         val64 |= ADAPTER_LED_ON;
6845                                         writeq(val64, &bar0->adapter_control);
6846                                 }
6847                                 nic->device_enabled_once = true;
6848                         } else {
6849                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6850                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6851                                 s2io_stop_all_tx_queue(nic);
6852                         }
6853                 }
6854                 val64 = readq(&bar0->adapter_control);
6855                 val64 |= ADAPTER_LED_ON;
6856                 writeq(val64, &bar0->adapter_control);
6857                 s2io_link(nic, LINK_UP);
6858         } else {
6859                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6860                                                       subid)) {
6861                         val64 = readq(&bar0->gpio_control);
6862                         val64 &= ~GPIO_CTRL_GPIO_0;
6863                         writeq(val64, &bar0->gpio_control);
6864                         val64 = readq(&bar0->gpio_control);
6865                 }
6866                 /* turn off LED */
6867                 val64 = readq(&bar0->adapter_control);
6868                 val64 = val64 &(~ADAPTER_LED_ON);
6869                 writeq(val64, &bar0->adapter_control);
6870                 s2io_link(nic, LINK_DOWN);
6871         }
6872         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6873
6874 out_unlock:
6875         rtnl_unlock();
6876 }
6877
6878 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6879                                 struct buffAdd *ba,
6880                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6881                                 u64 *temp2, int size)
6882 {
6883         struct net_device *dev = sp->dev;
6884         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6885
6886         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6887                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6888                 /* allocate skb */
6889                 if (*skb) {
6890                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6891                         /*
6892                          * As Rx frame are not going to be processed,
6893                          * using same mapped address for the Rxd
6894                          * buffer pointer
6895                          */
6896                         rxdp1->Buffer0_ptr = *temp0;
6897                 } else {
6898                         *skb = dev_alloc_skb(size);
6899                         if (!(*skb)) {
6900                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6901                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6902                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6903                                 sp->mac_control.stats_info->sw_stat. \
6904                                         mem_alloc_fail_cnt++;
6905                                 return -ENOMEM ;
6906                         }
6907                         sp->mac_control.stats_info->sw_stat.mem_allocated
6908                                 += (*skb)->truesize;
6909                         /* storing the mapped addr in a temp variable
6910                          * such it will be used for next rxd whose
6911                          * Host Control is NULL
6912                          */
6913                         rxdp1->Buffer0_ptr = *temp0 =
6914                                 pci_map_single( sp->pdev, (*skb)->data,
6915                                         size - NET_IP_ALIGN,
6916                                         PCI_DMA_FROMDEVICE);
6917                         if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6918                                 goto memalloc_failed;
6919                         rxdp->Host_Control = (unsigned long) (*skb);
6920                 }
6921         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6922                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6923                 /* Two buffer Mode */
6924                 if (*skb) {
6925                         rxdp3->Buffer2_ptr = *temp2;
6926                         rxdp3->Buffer0_ptr = *temp0;
6927                         rxdp3->Buffer1_ptr = *temp1;
6928                 } else {
6929                         *skb = dev_alloc_skb(size);
6930                         if (!(*skb)) {
6931                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6932                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6933                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6934                                 sp->mac_control.stats_info->sw_stat. \
6935                                         mem_alloc_fail_cnt++;
6936                                 return -ENOMEM;
6937                         }
6938                         sp->mac_control.stats_info->sw_stat.mem_allocated
6939                                 += (*skb)->truesize;
6940                         rxdp3->Buffer2_ptr = *temp2 =
6941                                 pci_map_single(sp->pdev, (*skb)->data,
6942                                                dev->mtu + 4,
6943                                                PCI_DMA_FROMDEVICE);
6944                         if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6945                                 goto memalloc_failed;
6946                         rxdp3->Buffer0_ptr = *temp0 =
6947                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6948                                                 PCI_DMA_FROMDEVICE);
6949                         if (pci_dma_mapping_error(sp->pdev,
6950                                                 rxdp3->Buffer0_ptr)) {
6951                                 pci_unmap_single (sp->pdev,
6952                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6953                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6954                                 goto memalloc_failed;
6955                         }
6956                         rxdp->Host_Control = (unsigned long) (*skb);
6957
6958                         /* Buffer-1 will be dummy buffer not used */
6959                         rxdp3->Buffer1_ptr = *temp1 =
6960                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6961                                                 PCI_DMA_FROMDEVICE);
6962                         if (pci_dma_mapping_error(sp->pdev,
6963                                                 rxdp3->Buffer1_ptr)) {
6964                                 pci_unmap_single (sp->pdev,
6965                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6966                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6967                                 pci_unmap_single (sp->pdev,
6968                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6969                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6970                                 goto memalloc_failed;
6971                         }
6972                 }
6973         }
6974         return 0;
6975         memalloc_failed:
6976                 stats->pci_map_fail_cnt++;
6977                 stats->mem_freed += (*skb)->truesize;
6978                 dev_kfree_skb(*skb);
6979                 return -ENOMEM;
6980 }
6981
6982 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6983                                 int size)
6984 {
6985         struct net_device *dev = sp->dev;
6986         if (sp->rxd_mode == RXD_MODE_1) {
6987                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6988         } else if (sp->rxd_mode == RXD_MODE_3B) {
6989                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6990                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6991                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6992         }
6993 }
6994
6995 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6996 {
6997         int i, j, k, blk_cnt = 0, size;
6998         struct mac_info * mac_control = &sp->mac_control;
6999         struct config_param *config = &sp->config;
7000         struct net_device *dev = sp->dev;
7001         struct RxD_t *rxdp = NULL;
7002         struct sk_buff *skb = NULL;
7003         struct buffAdd *ba = NULL;
7004         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
7005
7006         /* Calculate the size based on ring mode */
7007         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
7008                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
7009         if (sp->rxd_mode == RXD_MODE_1)
7010                 size += NET_IP_ALIGN;
7011         else if (sp->rxd_mode == RXD_MODE_3B)
7012                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
7013
7014         for (i = 0; i < config->rx_ring_num; i++) {
7015                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7016                 struct ring_info *ring = &mac_control->rings[i];
7017
7018                 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] +1);
7019
7020                 for (j = 0; j < blk_cnt; j++) {
7021                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
7022                                 rxdp = ring-> rx_blocks[j].rxds[k].virt_addr;
7023                                 if(sp->rxd_mode == RXD_MODE_3B)
7024                                         ba = &ring->ba[j][k];
7025                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
7026                                                        &skb,(u64 *)&temp0_64,
7027                                                        (u64 *)&temp1_64,
7028                                                        (u64 *)&temp2_64,
7029                                                         size) == -ENOMEM) {
7030                                         return 0;
7031                                 }
7032
7033                                 set_rxd_buffer_size(sp, rxdp, size);
7034                                 wmb();
7035                                 /* flip the Ownership bit to Hardware */
7036                                 rxdp->Control_1 |= RXD_OWN_XENA;
7037                         }
7038                 }
7039         }
7040         return 0;
7041
7042 }
7043
7044 static int s2io_add_isr(struct s2io_nic * sp)
7045 {
7046         int ret = 0;
7047         struct net_device *dev = sp->dev;
7048         int err = 0;
7049
7050         if (sp->config.intr_type == MSI_X)
7051                 ret = s2io_enable_msi_x(sp);
7052         if (ret) {
7053                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7054                 sp->config.intr_type = INTA;
7055         }
7056
7057         /* Store the values of the MSIX table in the struct s2io_nic structure */
7058         store_xmsi_data(sp);
7059
7060         /* After proper initialization of H/W, register ISR */
7061         if (sp->config.intr_type == MSI_X) {
7062                 int i, msix_rx_cnt = 0;
7063
7064                 for (i = 0; i < sp->num_entries; i++) {
7065                         if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7066                                 if (sp->s2io_entries[i].type ==
7067                                         MSIX_RING_TYPE) {
7068                                         sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7069                                                 dev->name, i);
7070                                         err = request_irq(sp->entries[i].vector,
7071                                                 s2io_msix_ring_handle, 0,
7072                                                 sp->desc[i],
7073                                                 sp->s2io_entries[i].arg);
7074                                 } else if (sp->s2io_entries[i].type ==
7075                                         MSIX_ALARM_TYPE) {
7076                                         sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7077                                         dev->name, i);
7078                                         err = request_irq(sp->entries[i].vector,
7079                                                 s2io_msix_fifo_handle, 0,
7080                                                 sp->desc[i],
7081                                                 sp->s2io_entries[i].arg);
7082
7083                                 }
7084                                 /* if either data or addr is zero print it. */
7085                                 if (!(sp->msix_info[i].addr &&
7086                                         sp->msix_info[i].data)) {
7087                                         DBG_PRINT(ERR_DBG,
7088                                                 "%s @Addr:0x%llx Data:0x%llx\n",
7089                                                 sp->desc[i],
7090                                                 (unsigned long long)
7091                                                 sp->msix_info[i].addr,
7092                                                 (unsigned long long)
7093                                                 ntohl(sp->msix_info[i].data));
7094                                 } else
7095                                         msix_rx_cnt++;
7096                                 if (err) {
7097                                         remove_msix_isr(sp);
7098
7099                                         DBG_PRINT(ERR_DBG,
7100                                                 "%s:MSI-X-%d registration "
7101                                                 "failed\n", dev->name, i);
7102
7103                                         DBG_PRINT(ERR_DBG,
7104                                                 "%s: Defaulting to INTA\n",
7105                                                 dev->name);
7106                                         sp->config.intr_type = INTA;
7107                                         break;
7108                                 }
7109                                 sp->s2io_entries[i].in_use =
7110                                         MSIX_REGISTERED_SUCCESS;
7111                         }
7112                 }
7113                 if (!err) {
7114                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7115                                 --msix_rx_cnt);
7116                         DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7117                                                 " through alarm vector\n");
7118                 }
7119         }
7120         if (sp->config.intr_type == INTA) {
7121                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7122                                 sp->name, dev);
7123                 if (err) {
7124                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7125                                   dev->name);
7126                         return -1;
7127                 }
7128         }
7129         return 0;
7130 }
7131 static void s2io_rem_isr(struct s2io_nic * sp)
7132 {
7133         if (sp->config.intr_type == MSI_X)
7134                 remove_msix_isr(sp);
7135         else
7136                 remove_inta_isr(sp);
7137 }
7138
7139 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7140 {
7141         int cnt = 0;
7142         struct XENA_dev_config __iomem *bar0 = sp->bar0;
7143         register u64 val64 = 0;
7144         struct config_param *config;
7145         config = &sp->config;
7146
7147         if (!is_s2io_card_up(sp))
7148                 return;
7149
7150         del_timer_sync(&sp->alarm_timer);
7151         /* If s2io_set_link task is executing, wait till it completes. */
7152         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7153                 msleep(50);
7154         }
7155         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7156
7157         /* Disable napi */
7158         if (sp->config.napi) {
7159                 int off = 0;
7160                 if (config->intr_type ==  MSI_X) {
7161                         for (; off < sp->config.rx_ring_num; off++)
7162                                 napi_disable(&sp->mac_control.rings[off].napi);
7163                         }
7164                 else
7165                         napi_disable(&sp->napi);
7166         }
7167
7168         /* disable Tx and Rx traffic on the NIC */
7169         if (do_io)
7170                 stop_nic(sp);
7171
7172         s2io_rem_isr(sp);
7173
7174         /* stop the tx queue, indicate link down */
7175         s2io_link(sp, LINK_DOWN);
7176
7177         /* Check if the device is Quiescent and then Reset the NIC */
7178         while(do_io) {
7179                 /* As per the HW requirement we need to replenish the
7180                  * receive buffer to avoid the ring bump. Since there is
7181                  * no intention of processing the Rx frame at this pointwe are
7182                  * just settting the ownership bit of rxd in Each Rx
7183                  * ring to HW and set the appropriate buffer size
7184                  * based on the ring mode
7185                  */
7186                 rxd_owner_bit_reset(sp);
7187
7188                 val64 = readq(&bar0->adapter_status);
7189                 if (verify_xena_quiescence(sp)) {
7190                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7191                         break;
7192                 }
7193
7194                 msleep(50);
7195                 cnt++;
7196                 if (cnt == 10) {
7197                         DBG_PRINT(ERR_DBG,
7198                                   "s2io_close:Device not Quiescent ");
7199                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7200                                   (unsigned long long) val64);
7201                         break;
7202                 }
7203         }
7204         if (do_io)
7205                 s2io_reset(sp);
7206
7207         /* Free all Tx buffers */
7208         free_tx_buffers(sp);
7209
7210         /* Free all Rx buffers */
7211         free_rx_buffers(sp);
7212
7213         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7214 }
7215
7216 static void s2io_card_down(struct s2io_nic * sp)
7217 {
7218         do_s2io_card_down(sp, 1);
7219 }
7220
7221 static int s2io_card_up(struct s2io_nic * sp)
7222 {
7223         int i, ret = 0;
7224         struct mac_info *mac_control;
7225         struct config_param *config;
7226         struct net_device *dev = (struct net_device *) sp->dev;
7227         u16 interruptible;
7228
7229         /* Initialize the H/W I/O registers */
7230         ret = init_nic(sp);
7231         if (ret != 0) {
7232                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7233                           dev->name);
7234                 if (ret != -EIO)
7235                         s2io_reset(sp);
7236                 return ret;
7237         }
7238
7239         /*
7240          * Initializing the Rx buffers. For now we are considering only 1
7241          * Rx ring and initializing buffers into 30 Rx blocks
7242          */
7243         mac_control = &sp->mac_control;
7244         config = &sp->config;
7245
7246         for (i = 0; i < config->rx_ring_num; i++) {
7247                 struct ring_info *ring = &mac_control->rings[i];
7248
7249                 ring->mtu = dev->mtu;
7250                 ret = fill_rx_buffers(sp, ring, 1);
7251                 if (ret) {
7252                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7253                                   dev->name);
7254                         s2io_reset(sp);
7255                         free_rx_buffers(sp);
7256                         return -ENOMEM;
7257                 }
7258                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7259                           ring->rx_bufs_left);
7260         }
7261
7262         /* Initialise napi */
7263         if (config->napi) {
7264                 if (config->intr_type ==  MSI_X) {
7265                         for (i = 0; i < sp->config.rx_ring_num; i++)
7266                                 napi_enable(&sp->mac_control.rings[i].napi);
7267                 } else {
7268                         napi_enable(&sp->napi);
7269                 }
7270         }
7271
7272         /* Maintain the state prior to the open */
7273         if (sp->promisc_flg)
7274                 sp->promisc_flg = 0;
7275         if (sp->m_cast_flg) {
7276                 sp->m_cast_flg = 0;
7277                 sp->all_multi_pos= 0;
7278         }
7279
7280         /* Setting its receive mode */
7281         s2io_set_multicast(dev);
7282
7283         if (sp->lro) {
7284                 /* Initialize max aggregatable pkts per session based on MTU */
7285                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7286                 /* Check if we can use(if specified) user provided value */
7287                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7288                         sp->lro_max_aggr_per_sess = lro_max_pkts;
7289         }
7290
7291         /* Enable Rx Traffic and interrupts on the NIC */
7292         if (start_nic(sp)) {
7293                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7294                 s2io_reset(sp);
7295                 free_rx_buffers(sp);
7296                 return -ENODEV;
7297         }
7298
7299         /* Add interrupt service routine */
7300         if (s2io_add_isr(sp) != 0) {
7301                 if (sp->config.intr_type == MSI_X)
7302                         s2io_rem_isr(sp);
7303                 s2io_reset(sp);
7304                 free_rx_buffers(sp);
7305                 return -ENODEV;
7306         }
7307
7308         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7309
7310         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7311
7312         /*  Enable select interrupts */
7313         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7314         if (sp->config.intr_type != INTA) {
7315                 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7316                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7317         } else {
7318                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7319                 interruptible |= TX_PIC_INTR;
7320                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7321         }
7322
7323         return 0;
7324 }
7325
7326 /**
7327  * s2io_restart_nic - Resets the NIC.
7328  * @data : long pointer to the device private structure
7329  * Description:
7330  * This function is scheduled to be run by the s2io_tx_watchdog
7331  * function after 0.5 secs to reset the NIC. The idea is to reduce
7332  * the run time of the watch dog routine which is run holding a
7333  * spin lock.
7334  */
7335
7336 static void s2io_restart_nic(struct work_struct *work)
7337 {
7338         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7339         struct net_device *dev = sp->dev;
7340
7341         rtnl_lock();
7342
7343         if (!netif_running(dev))
7344                 goto out_unlock;
7345
7346         s2io_card_down(sp);
7347         if (s2io_card_up(sp)) {
7348                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7349                           dev->name);
7350         }
7351         s2io_wake_all_tx_queue(sp);
7352         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7353                   dev->name);
7354 out_unlock:
7355         rtnl_unlock();
7356 }
7357
7358 /**
7359  *  s2io_tx_watchdog - Watchdog for transmit side.
7360  *  @dev : Pointer to net device structure
7361  *  Description:
7362  *  This function is triggered if the Tx Queue is stopped
7363  *  for a pre-defined amount of time when the Interface is still up.
7364  *  If the Interface is jammed in such a situation, the hardware is
7365  *  reset (by s2io_close) and restarted again (by s2io_open) to
7366  *  overcome any problem that might have been caused in the hardware.
7367  *  Return value:
7368  *  void
7369  */
7370
7371 static void s2io_tx_watchdog(struct net_device *dev)
7372 {
7373         struct s2io_nic *sp = netdev_priv(dev);
7374
7375         if (netif_carrier_ok(dev)) {
7376                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7377                 schedule_work(&sp->rst_timer_task);
7378                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7379         }
7380 }
7381
7382 /**
7383  *   rx_osm_handler - To perform some OS related operations on SKB.
7384  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7385  *   @skb : the socket buffer pointer.
7386  *   @len : length of the packet
7387  *   @cksum : FCS checksum of the frame.
7388  *   @ring_no : the ring from which this RxD was extracted.
7389  *   Description:
7390  *   This function is called by the Rx interrupt serivce routine to perform
7391  *   some OS related operations on the SKB before passing it to the upper
7392  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7393  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7394  *   to the upper layer. If the checksum is wrong, it increments the Rx
7395  *   packet error count, frees the SKB and returns error.
7396  *   Return value:
7397  *   SUCCESS on success and -1 on failure.
7398  */
7399 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7400 {
7401         struct s2io_nic *sp = ring_data->nic;
7402         struct net_device *dev = (struct net_device *) ring_data->dev;
7403         struct sk_buff *skb = (struct sk_buff *)
7404                 ((unsigned long) rxdp->Host_Control);
7405         int ring_no = ring_data->ring_no;
7406         u16 l3_csum, l4_csum;
7407         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7408         struct lro *uninitialized_var(lro);
7409         u8 err_mask;
7410
7411         skb->dev = dev;
7412
7413         if (err) {
7414                 /* Check for parity error */
7415                 if (err & 0x1) {
7416                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7417                 }
7418                 err_mask = err >> 48;
7419                 switch(err_mask) {
7420                         case 1:
7421                                 sp->mac_control.stats_info->sw_stat.
7422                                 rx_parity_err_cnt++;
7423                         break;
7424
7425                         case 2:
7426                                 sp->mac_control.stats_info->sw_stat.
7427                                 rx_abort_cnt++;
7428                         break;
7429
7430                         case 3:
7431                                 sp->mac_control.stats_info->sw_stat.
7432                                 rx_parity_abort_cnt++;
7433                         break;
7434
7435                         case 4:
7436                                 sp->mac_control.stats_info->sw_stat.
7437                                 rx_rda_fail_cnt++;
7438                         break;
7439
7440                         case 5:
7441                                 sp->mac_control.stats_info->sw_stat.
7442                                 rx_unkn_prot_cnt++;
7443                         break;
7444
7445                         case 6:
7446                                 sp->mac_control.stats_info->sw_stat.
7447                                 rx_fcs_err_cnt++;
7448                         break;
7449
7450                         case 7:
7451                                 sp->mac_control.stats_info->sw_stat.
7452                                 rx_buf_size_err_cnt++;
7453                         break;
7454
7455                         case 8:
7456                                 sp->mac_control.stats_info->sw_stat.
7457                                 rx_rxd_corrupt_cnt++;
7458                         break;
7459
7460                         case 15:
7461                                 sp->mac_control.stats_info->sw_stat.
7462                                 rx_unkn_err_cnt++;
7463                         break;
7464                 }
7465                 /*
7466                 * Drop the packet if bad transfer code. Exception being
7467                 * 0x5, which could be due to unsupported IPv6 extension header.
7468                 * In this case, we let stack handle the packet.
7469                 * Note that in this case, since checksum will be incorrect,
7470                 * stack will validate the same.
7471                 */
7472                 if (err_mask != 0x5) {
7473                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7474                                 dev->name, err_mask);
7475                         dev->stats.rx_crc_errors++;
7476                         sp->mac_control.stats_info->sw_stat.mem_freed
7477                                 += skb->truesize;
7478                         dev_kfree_skb(skb);
7479                         ring_data->rx_bufs_left -= 1;
7480                         rxdp->Host_Control = 0;
7481                         return 0;
7482                 }
7483         }
7484
7485         /* Updating statistics */
7486         ring_data->rx_packets++;
7487         rxdp->Host_Control = 0;
7488         if (sp->rxd_mode == RXD_MODE_1) {
7489                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7490
7491                 ring_data->rx_bytes += len;
7492                 skb_put(skb, len);
7493
7494         } else if (sp->rxd_mode == RXD_MODE_3B) {
7495                 int get_block = ring_data->rx_curr_get_info.block_index;
7496                 int get_off = ring_data->rx_curr_get_info.offset;
7497                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7498                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7499                 unsigned char *buff = skb_push(skb, buf0_len);
7500
7501                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7502                 ring_data->rx_bytes += buf0_len + buf2_len;
7503                 memcpy(buff, ba->ba_0, buf0_len);
7504                 skb_put(skb, buf2_len);
7505         }
7506
7507         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7508             (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7509             (sp->rx_csum)) {
7510                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7511                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7512                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7513                         /*
7514                          * NIC verifies if the Checksum of the received
7515                          * frame is Ok or not and accordingly returns
7516                          * a flag in the RxD.
7517                          */
7518                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7519                         if (ring_data->lro) {
7520                                 u32 tcp_len;
7521                                 u8 *tcp;
7522                                 int ret = 0;
7523
7524                                 ret = s2io_club_tcp_session(ring_data,
7525                                         skb->data, &tcp, &tcp_len, &lro,
7526                                         rxdp, sp);
7527                                 switch (ret) {
7528                                         case 3: /* Begin anew */
7529                                                 lro->parent = skb;
7530                                                 goto aggregate;
7531                                         case 1: /* Aggregate */
7532                                         {
7533                                                 lro_append_pkt(sp, lro,
7534                                                         skb, tcp_len);
7535                                                 goto aggregate;
7536                                         }
7537                                         case 4: /* Flush session */
7538                                         {
7539                                                 lro_append_pkt(sp, lro,
7540                                                         skb, tcp_len);
7541                                                 queue_rx_frame(lro->parent,
7542                                                         lro->vlan_tag);
7543                                                 clear_lro_session(lro);
7544                                                 sp->mac_control.stats_info->
7545                                                     sw_stat.flush_max_pkts++;
7546                                                 goto aggregate;
7547                                         }
7548                                         case 2: /* Flush both */
7549                                                 lro->parent->data_len =
7550                                                         lro->frags_len;
7551                                                 sp->mac_control.stats_info->
7552                                                      sw_stat.sending_both++;
7553                                                 queue_rx_frame(lro->parent,
7554                                                         lro->vlan_tag);
7555                                                 clear_lro_session(lro);
7556                                                 goto send_up;
7557                                         case 0: /* sessions exceeded */
7558                                         case -1: /* non-TCP or not
7559                                                   * L2 aggregatable
7560                                                   */
7561                                         case 5: /*
7562                                                  * First pkt in session not
7563                                                  * L3/L4 aggregatable
7564                                                  */
7565                                                 break;
7566                                         default:
7567                                                 DBG_PRINT(ERR_DBG,
7568                                                         "%s: Samadhana!!\n",
7569                                                          __func__);
7570                                                 BUG();
7571                                 }
7572                         }
7573                 } else {
7574                         /*
7575                          * Packet with erroneous checksum, let the
7576                          * upper layers deal with it.
7577                          */
7578                         skb->ip_summed = CHECKSUM_NONE;
7579                 }
7580         } else
7581                 skb->ip_summed = CHECKSUM_NONE;
7582
7583         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7584 send_up:
7585         skb_record_rx_queue(skb, ring_no);
7586         queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7587 aggregate:
7588         sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7589         return SUCCESS;
7590 }
7591
7592 /**
7593  *  s2io_link - stops/starts the Tx queue.
7594  *  @sp : private member of the device structure, which is a pointer to the
7595  *  s2io_nic structure.
7596  *  @link : inidicates whether link is UP/DOWN.
7597  *  Description:
7598  *  This function stops/starts the Tx queue depending on whether the link
7599  *  status of the NIC is is down or up. This is called by the Alarm
7600  *  interrupt handler whenever a link change interrupt comes up.
7601  *  Return value:
7602  *  void.
7603  */
7604
7605 static void s2io_link(struct s2io_nic * sp, int link)
7606 {
7607         struct net_device *dev = (struct net_device *) sp->dev;
7608
7609         if (link != sp->last_link_state) {
7610                 init_tti(sp, link);
7611                 if (link == LINK_DOWN) {
7612                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7613                         s2io_stop_all_tx_queue(sp);
7614                         netif_carrier_off(dev);
7615                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7616                         sp->mac_control.stats_info->sw_stat.link_up_time =
7617                                 jiffies - sp->start_time;
7618                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7619                 } else {
7620                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7621                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7622                         sp->mac_control.stats_info->sw_stat.link_down_time =
7623                                 jiffies - sp->start_time;
7624                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7625                         netif_carrier_on(dev);
7626                         s2io_wake_all_tx_queue(sp);
7627                 }
7628         }
7629         sp->last_link_state = link;
7630         sp->start_time = jiffies;
7631 }
7632
7633 /**
7634  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7635  *  @sp : private member of the device structure, which is a pointer to the
7636  *  s2io_nic structure.
7637  *  Description:
7638  *  This function initializes a few of the PCI and PCI-X configuration registers
7639  *  with recommended values.
7640  *  Return value:
7641  *  void
7642  */
7643
7644 static void s2io_init_pci(struct s2io_nic * sp)
7645 {
7646         u16 pci_cmd = 0, pcix_cmd = 0;
7647
7648         /* Enable Data Parity Error Recovery in PCI-X command register. */
7649         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7650                              &(pcix_cmd));
7651         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7652                               (pcix_cmd | 1));
7653         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7654                              &(pcix_cmd));
7655
7656         /* Set the PErr Response bit in PCI command register. */
7657         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7658         pci_write_config_word(sp->pdev, PCI_COMMAND,
7659                               (pci_cmd | PCI_COMMAND_PARITY));
7660         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7661 }
7662
7663 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7664         u8 *dev_multiq)
7665 {
7666         if ((tx_fifo_num > MAX_TX_FIFOS) ||
7667                 (tx_fifo_num < 1)) {
7668                 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7669                         "(%d) not supported\n", tx_fifo_num);
7670
7671                 if (tx_fifo_num < 1)
7672                         tx_fifo_num = 1;
7673                 else
7674                         tx_fifo_num = MAX_TX_FIFOS;
7675
7676                 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7677                 DBG_PRINT(ERR_DBG, "tx fifos\n");
7678         }
7679
7680         if (multiq)
7681                 *dev_multiq = multiq;
7682
7683         if (tx_steering_type && (1 == tx_fifo_num)) {
7684                 if (tx_steering_type != TX_DEFAULT_STEERING)
7685                         DBG_PRINT(ERR_DBG,
7686                                 "s2io: Tx steering is not supported with "
7687                                 "one fifo. Disabling Tx steering.\n");
7688                 tx_steering_type = NO_STEERING;
7689         }
7690
7691         if ((tx_steering_type < NO_STEERING) ||
7692                 (tx_steering_type > TX_DEFAULT_STEERING)) {
7693                 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7694                          "supported\n");
7695                 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7696                 tx_steering_type = NO_STEERING;
7697         }
7698
7699         if (rx_ring_num > MAX_RX_RINGS) {
7700                 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7701                          "supported\n");
7702                 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7703                         MAX_RX_RINGS);
7704                 rx_ring_num = MAX_RX_RINGS;
7705         }
7706
7707         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7708                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7709                           "Defaulting to INTA\n");
7710                 *dev_intr_type = INTA;
7711         }
7712
7713         if ((*dev_intr_type == MSI_X) &&
7714                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7715                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7716                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7717                                         "Defaulting to INTA\n");
7718                 *dev_intr_type = INTA;
7719         }
7720
7721         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7722                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7723                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7724                 rx_ring_mode = 1;
7725         }
7726         return SUCCESS;
7727 }
7728
7729 /**
7730  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7731  * or Traffic class respectively.
7732  * @nic: device private variable
7733  * Description: The function configures the receive steering to
7734  * desired receive ring.
7735  * Return Value:  SUCCESS on success and
7736  * '-1' on failure (endian settings incorrect).
7737  */
7738 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7739 {
7740         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7741         register u64 val64 = 0;
7742
7743         if (ds_codepoint > 63)
7744                 return FAILURE;
7745
7746         val64 = RTS_DS_MEM_DATA(ring);
7747         writeq(val64, &bar0->rts_ds_mem_data);
7748
7749         val64 = RTS_DS_MEM_CTRL_WE |
7750                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7751                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7752
7753         writeq(val64, &bar0->rts_ds_mem_ctrl);
7754
7755         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7756                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7757                                 S2IO_BIT_RESET);
7758 }
7759
7760 static const struct net_device_ops s2io_netdev_ops = {
7761         .ndo_open               = s2io_open,
7762         .ndo_stop               = s2io_close,
7763         .ndo_get_stats          = s2io_get_stats,
7764         .ndo_start_xmit         = s2io_xmit,
7765         .ndo_validate_addr      = eth_validate_addr,
7766         .ndo_set_multicast_list = s2io_set_multicast,
7767         .ndo_do_ioctl           = s2io_ioctl,
7768         .ndo_set_mac_address    = s2io_set_mac_addr,
7769         .ndo_change_mtu         = s2io_change_mtu,
7770         .ndo_vlan_rx_register   = s2io_vlan_rx_register,
7771         .ndo_vlan_rx_kill_vid   = s2io_vlan_rx_kill_vid,
7772         .ndo_tx_timeout         = s2io_tx_watchdog,
7773 #ifdef CONFIG_NET_POLL_CONTROLLER
7774         .ndo_poll_controller    = s2io_netpoll,
7775 #endif
7776 };
7777
7778 /**
7779  *  s2io_init_nic - Initialization of the adapter .
7780  *  @pdev : structure containing the PCI related information of the device.
7781  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7782  *  Description:
7783  *  The function initializes an adapter identified by the pci_dec structure.
7784  *  All OS related initialization including memory and device structure and
7785  *  initlaization of the device private variable is done. Also the swapper
7786  *  control register is initialized to enable read and write into the I/O
7787  *  registers of the device.
7788  *  Return value:
7789  *  returns 0 on success and negative on failure.
7790  */
7791
7792 static int __devinit
7793 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7794 {
7795         struct s2io_nic *sp;
7796         struct net_device *dev;
7797         int i, j, ret;
7798         int dma_flag = false;
7799         u32 mac_up, mac_down;
7800         u64 val64 = 0, tmp64 = 0;
7801         struct XENA_dev_config __iomem *bar0 = NULL;
7802         u16 subid;
7803         struct mac_info *mac_control;
7804         struct config_param *config;
7805         int mode;
7806         u8 dev_intr_type = intr_type;
7807         u8 dev_multiq = 0;
7808
7809         ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7810         if (ret)
7811                 return ret;
7812
7813         if ((ret = pci_enable_device(pdev))) {
7814                 DBG_PRINT(ERR_DBG,
7815                           "s2io_init_nic: pci_enable_device failed\n");
7816                 return ret;
7817         }
7818
7819         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7820                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7821                 dma_flag = true;
7822                 if (pci_set_consistent_dma_mask
7823                     (pdev, DMA_BIT_MASK(64))) {
7824                         DBG_PRINT(ERR_DBG,
7825                                   "Unable to obtain 64bit DMA for \
7826                                         consistent allocations\n");
7827                         pci_disable_device(pdev);
7828                         return -ENOMEM;
7829                 }
7830         } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7831                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7832         } else {
7833                 pci_disable_device(pdev);
7834                 return -ENOMEM;
7835         }
7836         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7837                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret);
7838                 pci_disable_device(pdev);
7839                 return -ENODEV;
7840         }
7841         if (dev_multiq)
7842                 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7843         else
7844                 dev = alloc_etherdev(sizeof(struct s2io_nic));
7845         if (dev == NULL) {
7846                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7847                 pci_disable_device(pdev);
7848                 pci_release_regions(pdev);
7849                 return -ENODEV;
7850         }
7851
7852         pci_set_master(pdev);
7853         pci_set_drvdata(pdev, dev);
7854         SET_NETDEV_DEV(dev, &pdev->dev);
7855
7856         /*  Private member variable initialized to s2io NIC structure */
7857         sp = netdev_priv(dev);
7858         memset(sp, 0, sizeof(struct s2io_nic));
7859         sp->dev = dev;
7860         sp->pdev = pdev;
7861         sp->high_dma_flag = dma_flag;
7862         sp->device_enabled_once = false;
7863         if (rx_ring_mode == 1)
7864                 sp->rxd_mode = RXD_MODE_1;
7865         if (rx_ring_mode == 2)
7866                 sp->rxd_mode = RXD_MODE_3B;
7867
7868         sp->config.intr_type = dev_intr_type;
7869
7870         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7871                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7872                 sp->device_type = XFRAME_II_DEVICE;
7873         else
7874                 sp->device_type = XFRAME_I_DEVICE;
7875
7876         sp->lro = lro_enable;
7877
7878         /* Initialize some PCI/PCI-X fields of the NIC. */
7879         s2io_init_pci(sp);
7880
7881         /*
7882          * Setting the device configuration parameters.
7883          * Most of these parameters can be specified by the user during
7884          * module insertion as they are module loadable parameters. If
7885          * these parameters are not not specified during load time, they
7886          * are initialized with default values.
7887          */
7888         mac_control = &sp->mac_control;
7889         config = &sp->config;
7890
7891         config->napi = napi;
7892         config->tx_steering_type = tx_steering_type;
7893
7894         /* Tx side parameters. */
7895         if (config->tx_steering_type == TX_PRIORITY_STEERING)
7896                 config->tx_fifo_num = MAX_TX_FIFOS;
7897         else
7898                 config->tx_fifo_num = tx_fifo_num;
7899
7900         /* Initialize the fifos used for tx steering */
7901         if (config->tx_fifo_num < 5) {
7902                         if (config->tx_fifo_num  == 1)
7903                                 sp->total_tcp_fifos = 1;
7904                         else
7905                                 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7906                         sp->udp_fifo_idx = config->tx_fifo_num - 1;
7907                         sp->total_udp_fifos = 1;
7908                         sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7909         } else {
7910                 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7911                                                 FIFO_OTHER_MAX_NUM);
7912                 sp->udp_fifo_idx = sp->total_tcp_fifos;
7913                 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7914                 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7915         }
7916
7917         config->multiq = dev_multiq;
7918         for (i = 0; i < config->tx_fifo_num; i++) {
7919                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7920
7921                 tx_cfg->fifo_len = tx_fifo_len[i];
7922                 tx_cfg->fifo_priority = i;
7923         }
7924
7925         /* mapping the QoS priority to the configured fifos */
7926         for (i = 0; i < MAX_TX_FIFOS; i++)
7927                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7928
7929         /* map the hashing selector table to the configured fifos */
7930         for (i = 0; i < config->tx_fifo_num; i++)
7931                 sp->fifo_selector[i] = fifo_selector[i];
7932
7933
7934         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7935         for (i = 0; i < config->tx_fifo_num; i++) {
7936                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7937
7938                 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7939                 if (tx_cfg->fifo_len < 65) {
7940                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7941                         break;
7942                 }
7943         }
7944         /* + 2 because one Txd for skb->data and one Txd for UFO */
7945         config->max_txds = MAX_SKB_FRAGS + 2;
7946
7947         /* Rx side parameters. */
7948         config->rx_ring_num = rx_ring_num;
7949         for (i = 0; i < config->rx_ring_num; i++) {
7950                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7951                 struct ring_info *ring = &mac_control->rings[i];
7952
7953                 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7954                 rx_cfg->ring_priority = i;
7955                 ring->rx_bufs_left = 0;
7956                 ring->rxd_mode = sp->rxd_mode;
7957                 ring->rxd_count = rxd_count[sp->rxd_mode];
7958                 ring->pdev = sp->pdev;
7959                 ring->dev = sp->dev;
7960         }
7961
7962         for (i = 0; i < rx_ring_num; i++) {
7963                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7964
7965                 rx_cfg->ring_org = RING_ORG_BUFF1;
7966                 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7967         }
7968
7969         /*  Setting Mac Control parameters */
7970         mac_control->rmac_pause_time = rmac_pause_time;
7971         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7972         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7973
7974
7975         /*  initialize the shared memory used by the NIC and the host */
7976         if (init_shared_mem(sp)) {
7977                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7978                           dev->name);
7979                 ret = -ENOMEM;
7980                 goto mem_alloc_failed;
7981         }
7982
7983         sp->bar0 = pci_ioremap_bar(pdev, 0);
7984         if (!sp->bar0) {
7985                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7986                           dev->name);
7987                 ret = -ENOMEM;
7988                 goto bar0_remap_failed;
7989         }
7990
7991         sp->bar1 = pci_ioremap_bar(pdev, 2);
7992         if (!sp->bar1) {
7993                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7994                           dev->name);
7995                 ret = -ENOMEM;
7996                 goto bar1_remap_failed;
7997         }
7998
7999         dev->irq = pdev->irq;
8000         dev->base_addr = (unsigned long) sp->bar0;
8001
8002         /* Initializing the BAR1 address as the start of the FIFO pointer. */
8003         for (j = 0; j < MAX_TX_FIFOS; j++) {
8004                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
8005                     (sp->bar1 + (j * 0x00020000));
8006         }
8007
8008         /*  Driver entry points */
8009         dev->netdev_ops = &s2io_netdev_ops;
8010         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
8011         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8012
8013         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8014         if (sp->high_dma_flag == true)
8015                 dev->features |= NETIF_F_HIGHDMA;
8016         dev->features |= NETIF_F_TSO;
8017         dev->features |= NETIF_F_TSO6;
8018         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
8019                 dev->features |= NETIF_F_UFO;
8020                 dev->features |= NETIF_F_HW_CSUM;
8021         }
8022         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
8023         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8024         INIT_WORK(&sp->set_link_task, s2io_set_link);
8025
8026         pci_save_state(sp->pdev);
8027
8028         /* Setting swapper control on the NIC, for proper reset operation */
8029         if (s2io_set_swapper(sp)) {
8030                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
8031                           dev->name);
8032                 ret = -EAGAIN;
8033                 goto set_swap_failed;
8034         }
8035
8036         /* Verify if the Herc works on the slot its placed into */
8037         if (sp->device_type & XFRAME_II_DEVICE) {
8038                 mode = s2io_verify_pci_mode(sp);
8039                 if (mode < 0) {
8040                         DBG_PRINT(ERR_DBG, "%s: ", __func__);
8041                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8042                         ret = -EBADSLT;
8043                         goto set_swap_failed;
8044                 }
8045         }
8046
8047         if (sp->config.intr_type == MSI_X) {
8048                 sp->num_entries = config->rx_ring_num + 1;
8049                 ret = s2io_enable_msi_x(sp);
8050
8051                 if (!ret) {
8052                         ret = s2io_test_msi(sp);
8053                         /* rollback MSI-X, will re-enable during add_isr() */
8054                         remove_msix_isr(sp);
8055                 }
8056                 if (ret) {
8057
8058                         DBG_PRINT(ERR_DBG,
8059                           "s2io: MSI-X requested but failed to enable\n");
8060                         sp->config.intr_type = INTA;
8061                 }
8062         }
8063
8064         if (config->intr_type ==  MSI_X) {
8065                 for (i = 0; i < config->rx_ring_num ; i++) {
8066                         struct ring_info *ring = &mac_control->rings[i];
8067
8068                         netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
8069                 }
8070         } else {
8071                 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8072         }
8073
8074         /* Not needed for Herc */
8075         if (sp->device_type & XFRAME_I_DEVICE) {
8076                 /*
8077                  * Fix for all "FFs" MAC address problems observed on
8078                  * Alpha platforms
8079                  */
8080                 fix_mac_address(sp);
8081                 s2io_reset(sp);
8082         }
8083
8084         /*
8085          * MAC address initialization.
8086          * For now only one mac address will be read and used.
8087          */
8088         bar0 = sp->bar0;
8089         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8090             RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8091         writeq(val64, &bar0->rmac_addr_cmd_mem);
8092         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8093                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8094         tmp64 = readq(&bar0->rmac_addr_data0_mem);
8095         mac_down = (u32) tmp64;
8096         mac_up = (u32) (tmp64 >> 32);
8097
8098         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8099         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8100         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8101         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8102         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8103         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8104
8105         /*  Set the factory defined MAC address initially   */
8106         dev->addr_len = ETH_ALEN;
8107         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8108         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8109
8110         /* initialize number of multicast & unicast MAC entries variables */
8111         if (sp->device_type == XFRAME_I_DEVICE) {
8112                 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8113                 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8114                 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8115         } else if (sp->device_type == XFRAME_II_DEVICE) {
8116                 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8117                 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8118                 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8119         }
8120
8121         /* store mac addresses from CAM to s2io_nic structure */
8122         do_s2io_store_unicast_mc(sp);
8123
8124         /* Configure MSIX vector for number of rings configured plus one */
8125         if ((sp->device_type == XFRAME_II_DEVICE) &&
8126                 (config->intr_type == MSI_X))
8127                 sp->num_entries = config->rx_ring_num + 1;
8128
8129          /* Store the values of the MSIX table in the s2io_nic structure */
8130         store_xmsi_data(sp);
8131         /* reset Nic and bring it to known state */
8132         s2io_reset(sp);
8133
8134         /*
8135          * Initialize link state flags
8136          * and the card state parameter
8137          */
8138         sp->state = 0;
8139
8140         /* Initialize spinlocks */
8141         for (i = 0; i < sp->config.tx_fifo_num; i++) {
8142                 struct fifo_info *fifo = &mac_control->fifos[i];
8143
8144                 spin_lock_init(&fifo->tx_lock);
8145         }
8146
8147         /*
8148          * SXE-002: Configure link and activity LED to init state
8149          * on driver load.
8150          */
8151         subid = sp->pdev->subsystem_device;
8152         if ((subid & 0xFF) >= 0x07) {
8153                 val64 = readq(&bar0->gpio_control);
8154                 val64 |= 0x0000800000000000ULL;
8155                 writeq(val64, &bar0->gpio_control);
8156                 val64 = 0x0411040400000000ULL;
8157                 writeq(val64, (void __iomem *) bar0 + 0x2700);
8158                 val64 = readq(&bar0->gpio_control);
8159         }
8160
8161         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
8162
8163         if (register_netdev(dev)) {
8164                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8165                 ret = -ENODEV;
8166                 goto register_failed;
8167         }
8168         s2io_vpd_read(sp);
8169         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8170         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8171                   sp->product_name, pdev->revision);
8172         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8173                   s2io_driver_version);
8174         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %pM\n", dev->name, dev->dev_addr);
8175         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8176         if (sp->device_type & XFRAME_II_DEVICE) {
8177                 mode = s2io_print_pci_mode(sp);
8178                 if (mode < 0) {
8179                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8180                         ret = -EBADSLT;
8181                         unregister_netdev(dev);
8182                         goto set_swap_failed;
8183                 }
8184         }
8185         switch(sp->rxd_mode) {
8186                 case RXD_MODE_1:
8187                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8188                                                 dev->name);
8189                     break;
8190                 case RXD_MODE_3B:
8191                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8192                                                 dev->name);
8193                     break;
8194         }
8195
8196         switch (sp->config.napi) {
8197         case 0:
8198                 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8199                 break;
8200         case 1:
8201                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8202                 break;
8203         }
8204
8205         DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8206                 sp->config.tx_fifo_num);
8207
8208         DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8209                   sp->config.rx_ring_num);
8210
8211         switch(sp->config.intr_type) {
8212                 case INTA:
8213                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8214                     break;
8215                 case MSI_X:
8216                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8217                     break;
8218         }
8219         if (sp->config.multiq) {
8220                 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8221                         struct fifo_info *fifo = &mac_control->fifos[i];
8222
8223                         fifo->multiq = config->multiq;
8224                 }
8225                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8226                         dev->name);
8227         } else
8228                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8229                         dev->name);
8230
8231         switch (sp->config.tx_steering_type) {
8232         case NO_STEERING:
8233                 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8234                         " transmit\n", dev->name);
8235                         break;
8236         case TX_PRIORITY_STEERING:
8237                 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8238                         " transmit\n", dev->name);
8239                 break;
8240         case TX_DEFAULT_STEERING:
8241                 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8242                         " transmit\n", dev->name);
8243         }
8244
8245         if (sp->lro)
8246                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8247                           dev->name);
8248         if (ufo)
8249                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8250                                         " enabled\n", dev->name);
8251         /* Initialize device name */
8252         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8253
8254         if (vlan_tag_strip)
8255                 sp->vlan_strip_flag = 1;
8256         else
8257                 sp->vlan_strip_flag = 0;
8258
8259         /*
8260          * Make Link state as off at this point, when the Link change
8261          * interrupt comes the state will be automatically changed to
8262          * the right state.
8263          */
8264         netif_carrier_off(dev);
8265
8266         return 0;
8267
8268       register_failed:
8269       set_swap_failed:
8270         iounmap(sp->bar1);
8271       bar1_remap_failed:
8272         iounmap(sp->bar0);
8273       bar0_remap_failed:
8274       mem_alloc_failed:
8275         free_shared_mem(sp);
8276         pci_disable_device(pdev);
8277         pci_release_regions(pdev);
8278         pci_set_drvdata(pdev, NULL);
8279         free_netdev(dev);
8280
8281         return ret;
8282 }
8283
8284 /**
8285  * s2io_rem_nic - Free the PCI device
8286  * @pdev: structure containing the PCI related information of the device.
8287  * Description: This function is called by the Pci subsystem to release a
8288  * PCI device and free up all resource held up by the device. This could
8289  * be in response to a Hot plug event or when the driver is to be removed
8290  * from memory.
8291  */
8292
8293 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8294 {
8295         struct net_device *dev =
8296             (struct net_device *) pci_get_drvdata(pdev);
8297         struct s2io_nic *sp;
8298
8299         if (dev == NULL) {
8300                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8301                 return;
8302         }
8303
8304         flush_scheduled_work();
8305
8306         sp = netdev_priv(dev);
8307         unregister_netdev(dev);
8308
8309         free_shared_mem(sp);
8310         iounmap(sp->bar0);
8311         iounmap(sp->bar1);
8312         pci_release_regions(pdev);
8313         pci_set_drvdata(pdev, NULL);
8314         free_netdev(dev);
8315         pci_disable_device(pdev);
8316 }
8317
8318 /**
8319  * s2io_starter - Entry point for the driver
8320  * Description: This function is the entry point for the driver. It verifies
8321  * the module loadable parameters and initializes PCI configuration space.
8322  */
8323
8324 static int __init s2io_starter(void)
8325 {
8326         return pci_register_driver(&s2io_driver);
8327 }
8328
8329 /**
8330  * s2io_closer - Cleanup routine for the driver
8331  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8332  */
8333
8334 static __exit void s2io_closer(void)
8335 {
8336         pci_unregister_driver(&s2io_driver);
8337         DBG_PRINT(INIT_DBG, "cleanup done\n");
8338 }
8339
8340 module_init(s2io_starter);
8341 module_exit(s2io_closer);
8342
8343 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8344                 struct tcphdr **tcp, struct RxD_t *rxdp,
8345                 struct s2io_nic *sp)
8346 {
8347         int ip_off;
8348         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8349
8350         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8351                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8352                           __func__);
8353                 return -1;
8354         }
8355
8356         /* Checking for DIX type or DIX type with VLAN */
8357         if ((l2_type == 0)
8358                 || (l2_type == 4)) {
8359                 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8360                 /*
8361                  * If vlan stripping is disabled and the frame is VLAN tagged,
8362                  * shift the offset by the VLAN header size bytes.
8363                  */
8364                 if ((!sp->vlan_strip_flag) &&
8365                         (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8366                         ip_off += HEADER_VLAN_SIZE;
8367         } else {
8368                 /* LLC, SNAP etc are considered non-mergeable */
8369                 return -1;
8370         }
8371
8372         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8373         ip_len = (u8)((*ip)->ihl);
8374         ip_len <<= 2;
8375         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8376
8377         return 0;
8378 }
8379
8380 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8381                                   struct tcphdr *tcp)
8382 {
8383         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8384         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8385            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8386                 return -1;
8387         return 0;
8388 }
8389
8390 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8391 {
8392         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8393 }
8394
8395 static void initiate_new_session(struct lro *lro, u8 *l2h,
8396         struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8397 {
8398         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8399         lro->l2h = l2h;
8400         lro->iph = ip;
8401         lro->tcph = tcp;
8402         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8403         lro->tcp_ack = tcp->ack_seq;
8404         lro->sg_num = 1;
8405         lro->total_len = ntohs(ip->tot_len);
8406         lro->frags_len = 0;
8407         lro->vlan_tag = vlan_tag;
8408         /*
8409          * check if we saw TCP timestamp. Other consistency checks have
8410          * already been done.
8411          */
8412         if (tcp->doff == 8) {
8413                 __be32 *ptr;
8414                 ptr = (__be32 *)(tcp+1);
8415                 lro->saw_ts = 1;
8416                 lro->cur_tsval = ntohl(*(ptr+1));
8417                 lro->cur_tsecr = *(ptr+2);
8418         }
8419         lro->in_use = 1;
8420 }
8421
8422 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8423 {
8424         struct iphdr *ip = lro->iph;
8425         struct tcphdr *tcp = lro->tcph;
8426         __sum16 nchk;
8427         struct stat_block *statinfo = sp->mac_control.stats_info;
8428         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8429
8430         /* Update L3 header */
8431         ip->tot_len = htons(lro->total_len);
8432         ip->check = 0;
8433         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8434         ip->check = nchk;
8435
8436         /* Update L4 header */
8437         tcp->ack_seq = lro->tcp_ack;
8438         tcp->window = lro->window;
8439
8440         /* Update tsecr field if this session has timestamps enabled */
8441         if (lro->saw_ts) {
8442                 __be32 *ptr = (__be32 *)(tcp + 1);
8443                 *(ptr+2) = lro->cur_tsecr;
8444         }
8445
8446         /* Update counters required for calculation of
8447          * average no. of packets aggregated.
8448          */
8449         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8450         statinfo->sw_stat.num_aggregations++;
8451 }
8452
8453 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8454                 struct tcphdr *tcp, u32 l4_pyld)
8455 {
8456         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8457         lro->total_len += l4_pyld;
8458         lro->frags_len += l4_pyld;
8459         lro->tcp_next_seq += l4_pyld;
8460         lro->sg_num++;
8461
8462         /* Update ack seq no. and window ad(from this pkt) in LRO object */
8463         lro->tcp_ack = tcp->ack_seq;
8464         lro->window = tcp->window;
8465
8466         if (lro->saw_ts) {
8467                 __be32 *ptr;
8468                 /* Update tsecr and tsval from this packet */
8469                 ptr = (__be32 *)(tcp+1);
8470                 lro->cur_tsval = ntohl(*(ptr+1));
8471                 lro->cur_tsecr = *(ptr + 2);
8472         }
8473 }
8474
8475 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8476                                     struct tcphdr *tcp, u32 tcp_pyld_len)
8477 {
8478         u8 *ptr;
8479
8480         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8481
8482         if (!tcp_pyld_len) {
8483                 /* Runt frame or a pure ack */
8484                 return -1;
8485         }
8486
8487         if (ip->ihl != 5) /* IP has options */
8488                 return -1;
8489
8490         /* If we see CE codepoint in IP header, packet is not mergeable */
8491         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8492                 return -1;
8493
8494         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8495         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8496                                     tcp->ece || tcp->cwr || !tcp->ack) {
8497                 /*
8498                  * Currently recognize only the ack control word and
8499                  * any other control field being set would result in
8500                  * flushing the LRO session
8501                  */
8502                 return -1;
8503         }
8504
8505         /*
8506          * Allow only one TCP timestamp option. Don't aggregate if
8507          * any other options are detected.
8508          */
8509         if (tcp->doff != 5 && tcp->doff != 8)
8510                 return -1;
8511
8512         if (tcp->doff == 8) {
8513                 ptr = (u8 *)(tcp + 1);
8514                 while (*ptr == TCPOPT_NOP)
8515                         ptr++;
8516                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8517                         return -1;
8518
8519                 /* Ensure timestamp value increases monotonically */
8520                 if (l_lro)
8521                         if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8522                                 return -1;
8523
8524                 /* timestamp echo reply should be non-zero */
8525                 if (*((__be32 *)(ptr+6)) == 0)
8526                         return -1;
8527         }
8528
8529         return 0;
8530 }
8531
8532 static int
8533 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8534         u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8535         struct s2io_nic *sp)
8536 {
8537         struct iphdr *ip;
8538         struct tcphdr *tcph;
8539         int ret = 0, i;
8540         u16 vlan_tag = 0;
8541
8542         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8543                                          rxdp, sp))) {
8544                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8545                           ip->saddr, ip->daddr);
8546         } else
8547                 return ret;
8548
8549         vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8550         tcph = (struct tcphdr *)*tcp;
8551         *tcp_len = get_l4_pyld_length(ip, tcph);
8552         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8553                 struct lro *l_lro = &ring_data->lro0_n[i];
8554                 if (l_lro->in_use) {
8555                         if (check_for_socket_match(l_lro, ip, tcph))
8556                                 continue;
8557                         /* Sock pair matched */
8558                         *lro = l_lro;
8559
8560                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8561                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8562                                           "0x%x, actual 0x%x\n", __func__,
8563                                           (*lro)->tcp_next_seq,
8564                                           ntohl(tcph->seq));
8565
8566                                 sp->mac_control.stats_info->
8567                                    sw_stat.outof_sequence_pkts++;
8568                                 ret = 2;
8569                                 break;
8570                         }
8571
8572                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8573                                 ret = 1; /* Aggregate */
8574                         else
8575                                 ret = 2; /* Flush both */
8576                         break;
8577                 }
8578         }
8579
8580         if (ret == 0) {
8581                 /* Before searching for available LRO objects,
8582                  * check if the pkt is L3/L4 aggregatable. If not
8583                  * don't create new LRO session. Just send this
8584                  * packet up.
8585                  */
8586                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8587                         return 5;
8588                 }
8589
8590                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8591                         struct lro *l_lro = &ring_data->lro0_n[i];
8592                         if (!(l_lro->in_use)) {
8593                                 *lro = l_lro;
8594                                 ret = 3; /* Begin anew */
8595                                 break;
8596                         }
8597                 }
8598         }
8599
8600         if (ret == 0) { /* sessions exceeded */
8601                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8602                           __func__);
8603                 *lro = NULL;
8604                 return ret;
8605         }
8606
8607         switch (ret) {
8608                 case 3:
8609                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8610                                                                 vlan_tag);
8611                         break;
8612                 case 2:
8613                         update_L3L4_header(sp, *lro);
8614                         break;
8615                 case 1:
8616                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8617                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8618                                 update_L3L4_header(sp, *lro);
8619                                 ret = 4; /* Flush the LRO */
8620                         }
8621                         break;
8622                 default:
8623                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8624                                 __func__);
8625                         break;
8626         }
8627
8628         return ret;
8629 }
8630
8631 static void clear_lro_session(struct lro *lro)
8632 {
8633         static u16 lro_struct_size = sizeof(struct lro);
8634
8635         memset(lro, 0, lro_struct_size);
8636 }
8637
8638 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8639 {
8640         struct net_device *dev = skb->dev;
8641         struct s2io_nic *sp = netdev_priv(dev);
8642
8643         skb->protocol = eth_type_trans(skb, dev);
8644         if (sp->vlgrp && vlan_tag
8645                 && (sp->vlan_strip_flag)) {
8646                 /* Queueing the vlan frame to the upper layer */
8647                 if (sp->config.napi)
8648                         vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8649                 else
8650                         vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8651         } else {
8652                 if (sp->config.napi)
8653                         netif_receive_skb(skb);
8654                 else
8655                         netif_rx(skb);
8656         }
8657 }
8658
8659 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8660                            struct sk_buff *skb,
8661                            u32 tcp_len)
8662 {
8663         struct sk_buff *first = lro->parent;
8664
8665         first->len += tcp_len;
8666         first->data_len = lro->frags_len;
8667         skb_pull(skb, (skb->len - tcp_len));
8668         if (skb_shinfo(first)->frag_list)
8669                 lro->last_frag->next = skb;
8670         else
8671                 skb_shinfo(first)->frag_list = skb;
8672         first->truesize += skb->truesize;
8673         lro->last_frag = skb;
8674         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8675         return;
8676 }
8677
8678 /**
8679  * s2io_io_error_detected - called when PCI error is detected
8680  * @pdev: Pointer to PCI device
8681  * @state: The current pci connection state
8682  *
8683  * This function is called after a PCI bus error affecting
8684  * this device has been detected.
8685  */
8686 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8687                                                pci_channel_state_t state)
8688 {
8689         struct net_device *netdev = pci_get_drvdata(pdev);
8690         struct s2io_nic *sp = netdev_priv(netdev);
8691
8692         netif_device_detach(netdev);
8693
8694         if (state == pci_channel_io_perm_failure)
8695                 return PCI_ERS_RESULT_DISCONNECT;
8696
8697         if (netif_running(netdev)) {
8698                 /* Bring down the card, while avoiding PCI I/O */
8699                 do_s2io_card_down(sp, 0);
8700         }
8701         pci_disable_device(pdev);
8702
8703         return PCI_ERS_RESULT_NEED_RESET;
8704 }
8705
8706 /**
8707  * s2io_io_slot_reset - called after the pci bus has been reset.
8708  * @pdev: Pointer to PCI device
8709  *
8710  * Restart the card from scratch, as if from a cold-boot.
8711  * At this point, the card has exprienced a hard reset,
8712  * followed by fixups by BIOS, and has its config space
8713  * set up identically to what it was at cold boot.
8714  */
8715 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8716 {
8717         struct net_device *netdev = pci_get_drvdata(pdev);
8718         struct s2io_nic *sp = netdev_priv(netdev);
8719
8720         if (pci_enable_device(pdev)) {
8721                 printk(KERN_ERR "s2io: "
8722                        "Cannot re-enable PCI device after reset.\n");
8723                 return PCI_ERS_RESULT_DISCONNECT;
8724         }
8725
8726         pci_set_master(pdev);
8727         s2io_reset(sp);
8728
8729         return PCI_ERS_RESULT_RECOVERED;
8730 }
8731
8732 /**
8733  * s2io_io_resume - called when traffic can start flowing again.
8734  * @pdev: Pointer to PCI device
8735  *
8736  * This callback is called when the error recovery driver tells
8737  * us that its OK to resume normal operation.
8738  */
8739 static void s2io_io_resume(struct pci_dev *pdev)
8740 {
8741         struct net_device *netdev = pci_get_drvdata(pdev);
8742         struct s2io_nic *sp = netdev_priv(netdev);
8743
8744         if (netif_running(netdev)) {
8745                 if (s2io_card_up(sp)) {
8746                         printk(KERN_ERR "s2io: "
8747                                "Can't bring device back up after reset.\n");
8748                         return;
8749                 }
8750
8751                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8752                         s2io_card_down(sp);
8753                         printk(KERN_ERR "s2io: "
8754                                "Can't resetore mac addr after reset.\n");
8755                         return;
8756                 }
8757         }
8758
8759         netif_device_attach(netdev);
8760         netif_tx_wake_all_queues(netdev);
8761 }