s2io.c: convert printks to pr_<level>
[safe/jmp/linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
58
59 #include <linux/module.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/ioport.h>
63 #include <linux/pci.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/kernel.h>
66 #include <linux/netdevice.h>
67 #include <linux/etherdevice.h>
68 #include <linux/mdio.h>
69 #include <linux/skbuff.h>
70 #include <linux/init.h>
71 #include <linux/delay.h>
72 #include <linux/stddef.h>
73 #include <linux/ioctl.h>
74 #include <linux/timex.h>
75 #include <linux/ethtool.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
78 #include <linux/ip.h>
79 #include <linux/tcp.h>
80 #include <linux/uaccess.h>
81 #include <linux/io.h>
82 #include <net/tcp.h>
83
84 #include <asm/system.h>
85 #include <asm/div64.h>
86 #include <asm/irq.h>
87
88 /* local include */
89 #include "s2io.h"
90 #include "s2io-regs.h"
91
92 #define DRV_VERSION "2.0.26.25"
93
94 /* S2io Driver name & version. */
95 static char s2io_driver_name[] = "Neterion";
96 static char s2io_driver_version[] = DRV_VERSION;
97
98 static int rxd_size[2] = {32, 48};
99 static int rxd_count[2] = {127, 85};
100
101 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 {
103         int ret;
104
105         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
106                (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107
108         return ret;
109 }
110
111 /*
112  * Cards with following subsystem_id have a link state indication
113  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
114  * macro below identifies these cards given the subsystem_id.
115  */
116 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)              \
117         (dev_type == XFRAME_I_DEVICE) ?                                 \
118         ((((subid >= 0x600B) && (subid <= 0x600D)) ||                   \
119           ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
120
121 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
122                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
123
124 static inline int is_s2io_card_up(const struct s2io_nic *sp)
125 {
126         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
127 }
128
129 /* Ethtool related variables and Macros. */
130 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
131         "Register test\t(offline)",
132         "Eeprom test\t(offline)",
133         "Link test\t(online)",
134         "RLDRAM test\t(offline)",
135         "BIST Test\t(offline)"
136 };
137
138 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
139         {"tmac_frms"},
140         {"tmac_data_octets"},
141         {"tmac_drop_frms"},
142         {"tmac_mcst_frms"},
143         {"tmac_bcst_frms"},
144         {"tmac_pause_ctrl_frms"},
145         {"tmac_ttl_octets"},
146         {"tmac_ucst_frms"},
147         {"tmac_nucst_frms"},
148         {"tmac_any_err_frms"},
149         {"tmac_ttl_less_fb_octets"},
150         {"tmac_vld_ip_octets"},
151         {"tmac_vld_ip"},
152         {"tmac_drop_ip"},
153         {"tmac_icmp"},
154         {"tmac_rst_tcp"},
155         {"tmac_tcp"},
156         {"tmac_udp"},
157         {"rmac_vld_frms"},
158         {"rmac_data_octets"},
159         {"rmac_fcs_err_frms"},
160         {"rmac_drop_frms"},
161         {"rmac_vld_mcst_frms"},
162         {"rmac_vld_bcst_frms"},
163         {"rmac_in_rng_len_err_frms"},
164         {"rmac_out_rng_len_err_frms"},
165         {"rmac_long_frms"},
166         {"rmac_pause_ctrl_frms"},
167         {"rmac_unsup_ctrl_frms"},
168         {"rmac_ttl_octets"},
169         {"rmac_accepted_ucst_frms"},
170         {"rmac_accepted_nucst_frms"},
171         {"rmac_discarded_frms"},
172         {"rmac_drop_events"},
173         {"rmac_ttl_less_fb_octets"},
174         {"rmac_ttl_frms"},
175         {"rmac_usized_frms"},
176         {"rmac_osized_frms"},
177         {"rmac_frag_frms"},
178         {"rmac_jabber_frms"},
179         {"rmac_ttl_64_frms"},
180         {"rmac_ttl_65_127_frms"},
181         {"rmac_ttl_128_255_frms"},
182         {"rmac_ttl_256_511_frms"},
183         {"rmac_ttl_512_1023_frms"},
184         {"rmac_ttl_1024_1518_frms"},
185         {"rmac_ip"},
186         {"rmac_ip_octets"},
187         {"rmac_hdr_err_ip"},
188         {"rmac_drop_ip"},
189         {"rmac_icmp"},
190         {"rmac_tcp"},
191         {"rmac_udp"},
192         {"rmac_err_drp_udp"},
193         {"rmac_xgmii_err_sym"},
194         {"rmac_frms_q0"},
195         {"rmac_frms_q1"},
196         {"rmac_frms_q2"},
197         {"rmac_frms_q3"},
198         {"rmac_frms_q4"},
199         {"rmac_frms_q5"},
200         {"rmac_frms_q6"},
201         {"rmac_frms_q7"},
202         {"rmac_full_q0"},
203         {"rmac_full_q1"},
204         {"rmac_full_q2"},
205         {"rmac_full_q3"},
206         {"rmac_full_q4"},
207         {"rmac_full_q5"},
208         {"rmac_full_q6"},
209         {"rmac_full_q7"},
210         {"rmac_pause_cnt"},
211         {"rmac_xgmii_data_err_cnt"},
212         {"rmac_xgmii_ctrl_err_cnt"},
213         {"rmac_accepted_ip"},
214         {"rmac_err_tcp"},
215         {"rd_req_cnt"},
216         {"new_rd_req_cnt"},
217         {"new_rd_req_rtry_cnt"},
218         {"rd_rtry_cnt"},
219         {"wr_rtry_rd_ack_cnt"},
220         {"wr_req_cnt"},
221         {"new_wr_req_cnt"},
222         {"new_wr_req_rtry_cnt"},
223         {"wr_rtry_cnt"},
224         {"wr_disc_cnt"},
225         {"rd_rtry_wr_ack_cnt"},
226         {"txp_wr_cnt"},
227         {"txd_rd_cnt"},
228         {"txd_wr_cnt"},
229         {"rxd_rd_cnt"},
230         {"rxd_wr_cnt"},
231         {"txf_rd_cnt"},
232         {"rxf_wr_cnt"}
233 };
234
235 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
236         {"rmac_ttl_1519_4095_frms"},
237         {"rmac_ttl_4096_8191_frms"},
238         {"rmac_ttl_8192_max_frms"},
239         {"rmac_ttl_gt_max_frms"},
240         {"rmac_osized_alt_frms"},
241         {"rmac_jabber_alt_frms"},
242         {"rmac_gt_max_alt_frms"},
243         {"rmac_vlan_frms"},
244         {"rmac_len_discard"},
245         {"rmac_fcs_discard"},
246         {"rmac_pf_discard"},
247         {"rmac_da_discard"},
248         {"rmac_red_discard"},
249         {"rmac_rts_discard"},
250         {"rmac_ingm_full_discard"},
251         {"link_fault_cnt"}
252 };
253
254 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
255         {"\n DRIVER STATISTICS"},
256         {"single_bit_ecc_errs"},
257         {"double_bit_ecc_errs"},
258         {"parity_err_cnt"},
259         {"serious_err_cnt"},
260         {"soft_reset_cnt"},
261         {"fifo_full_cnt"},
262         {"ring_0_full_cnt"},
263         {"ring_1_full_cnt"},
264         {"ring_2_full_cnt"},
265         {"ring_3_full_cnt"},
266         {"ring_4_full_cnt"},
267         {"ring_5_full_cnt"},
268         {"ring_6_full_cnt"},
269         {"ring_7_full_cnt"},
270         {"alarm_transceiver_temp_high"},
271         {"alarm_transceiver_temp_low"},
272         {"alarm_laser_bias_current_high"},
273         {"alarm_laser_bias_current_low"},
274         {"alarm_laser_output_power_high"},
275         {"alarm_laser_output_power_low"},
276         {"warn_transceiver_temp_high"},
277         {"warn_transceiver_temp_low"},
278         {"warn_laser_bias_current_high"},
279         {"warn_laser_bias_current_low"},
280         {"warn_laser_output_power_high"},
281         {"warn_laser_output_power_low"},
282         {"lro_aggregated_pkts"},
283         {"lro_flush_both_count"},
284         {"lro_out_of_sequence_pkts"},
285         {"lro_flush_due_to_max_pkts"},
286         {"lro_avg_aggr_pkts"},
287         {"mem_alloc_fail_cnt"},
288         {"pci_map_fail_cnt"},
289         {"watchdog_timer_cnt"},
290         {"mem_allocated"},
291         {"mem_freed"},
292         {"link_up_cnt"},
293         {"link_down_cnt"},
294         {"link_up_time"},
295         {"link_down_time"},
296         {"tx_tcode_buf_abort_cnt"},
297         {"tx_tcode_desc_abort_cnt"},
298         {"tx_tcode_parity_err_cnt"},
299         {"tx_tcode_link_loss_cnt"},
300         {"tx_tcode_list_proc_err_cnt"},
301         {"rx_tcode_parity_err_cnt"},
302         {"rx_tcode_abort_cnt"},
303         {"rx_tcode_parity_abort_cnt"},
304         {"rx_tcode_rda_fail_cnt"},
305         {"rx_tcode_unkn_prot_cnt"},
306         {"rx_tcode_fcs_err_cnt"},
307         {"rx_tcode_buf_size_err_cnt"},
308         {"rx_tcode_rxd_corrupt_cnt"},
309         {"rx_tcode_unkn_err_cnt"},
310         {"tda_err_cnt"},
311         {"pfc_err_cnt"},
312         {"pcc_err_cnt"},
313         {"tti_err_cnt"},
314         {"tpa_err_cnt"},
315         {"sm_err_cnt"},
316         {"lso_err_cnt"},
317         {"mac_tmac_err_cnt"},
318         {"mac_rmac_err_cnt"},
319         {"xgxs_txgxs_err_cnt"},
320         {"xgxs_rxgxs_err_cnt"},
321         {"rc_err_cnt"},
322         {"prc_pcix_err_cnt"},
323         {"rpa_err_cnt"},
324         {"rda_err_cnt"},
325         {"rti_err_cnt"},
326         {"mc_err_cnt"}
327 };
328
329 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
330 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
331 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
332
333 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
334 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
335
336 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
337 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
338
339 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
340 #define S2IO_STRINGS_LEN        (S2IO_TEST_LEN * ETH_GSTRING_LEN)
341
342 #define S2IO_TIMER_CONF(timer, handle, arg, exp)        \
343         init_timer(&timer);                             \
344         timer.function = handle;                        \
345         timer.data = (unsigned long)arg;                \
346         mod_timer(&timer, (jiffies + exp))              \
347
348 /* copy mac addr to def_mac_addr array */
349 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
350 {
351         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
352         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
353         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
354         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
355         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
356         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
357 }
358
359 /* Add the vlan */
360 static void s2io_vlan_rx_register(struct net_device *dev,
361                                   struct vlan_group *grp)
362 {
363         int i;
364         struct s2io_nic *nic = netdev_priv(dev);
365         unsigned long flags[MAX_TX_FIFOS];
366         struct mac_info *mac_control = &nic->mac_control;
367         struct config_param *config = &nic->config;
368
369         for (i = 0; i < config->tx_fifo_num; i++) {
370                 struct fifo_info *fifo = &mac_control->fifos[i];
371
372                 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
373         }
374
375         nic->vlgrp = grp;
376
377         for (i = config->tx_fifo_num - 1; i >= 0; i--) {
378                 struct fifo_info *fifo = &mac_control->fifos[i];
379
380                 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
381         }
382 }
383
384 /* Unregister the vlan */
385 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
386 {
387         int i;
388         struct s2io_nic *nic = netdev_priv(dev);
389         unsigned long flags[MAX_TX_FIFOS];
390         struct mac_info *mac_control = &nic->mac_control;
391         struct config_param *config = &nic->config;
392
393         for (i = 0; i < config->tx_fifo_num; i++) {
394                 struct fifo_info *fifo = &mac_control->fifos[i];
395
396                 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
397         }
398
399         if (nic->vlgrp)
400                 vlan_group_set_device(nic->vlgrp, vid, NULL);
401
402         for (i = config->tx_fifo_num - 1; i >= 0; i--) {
403                 struct fifo_info *fifo = &mac_control->fifos[i];
404
405                 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
406         }
407 }
408
409 /*
410  * Constants to be programmed into the Xena's registers, to configure
411  * the XAUI.
412  */
413
414 #define END_SIGN        0x0
415 static const u64 herc_act_dtx_cfg[] = {
416         /* Set address */
417         0x8000051536750000ULL, 0x80000515367500E0ULL,
418         /* Write data */
419         0x8000051536750004ULL, 0x80000515367500E4ULL,
420         /* Set address */
421         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
422         /* Write data */
423         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
424         /* Set address */
425         0x801205150D440000ULL, 0x801205150D4400E0ULL,
426         /* Write data */
427         0x801205150D440004ULL, 0x801205150D4400E4ULL,
428         /* Set address */
429         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
430         /* Write data */
431         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
432         /* Done */
433         END_SIGN
434 };
435
436 static const u64 xena_dtx_cfg[] = {
437         /* Set address */
438         0x8000051500000000ULL, 0x80000515000000E0ULL,
439         /* Write data */
440         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
441         /* Set address */
442         0x8001051500000000ULL, 0x80010515000000E0ULL,
443         /* Write data */
444         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
445         /* Set address */
446         0x8002051500000000ULL, 0x80020515000000E0ULL,
447         /* Write data */
448         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
449         END_SIGN
450 };
451
452 /*
453  * Constants for Fixing the MacAddress problem seen mostly on
454  * Alpha machines.
455  */
456 static const u64 fix_mac[] = {
457         0x0060000000000000ULL, 0x0060600000000000ULL,
458         0x0040600000000000ULL, 0x0000600000000000ULL,
459         0x0020600000000000ULL, 0x0060600000000000ULL,
460         0x0020600000000000ULL, 0x0060600000000000ULL,
461         0x0020600000000000ULL, 0x0060600000000000ULL,
462         0x0020600000000000ULL, 0x0060600000000000ULL,
463         0x0020600000000000ULL, 0x0060600000000000ULL,
464         0x0020600000000000ULL, 0x0060600000000000ULL,
465         0x0020600000000000ULL, 0x0060600000000000ULL,
466         0x0020600000000000ULL, 0x0060600000000000ULL,
467         0x0020600000000000ULL, 0x0060600000000000ULL,
468         0x0020600000000000ULL, 0x0060600000000000ULL,
469         0x0020600000000000ULL, 0x0000600000000000ULL,
470         0x0040600000000000ULL, 0x0060600000000000ULL,
471         END_SIGN
472 };
473
474 MODULE_LICENSE("GPL");
475 MODULE_VERSION(DRV_VERSION);
476
477
478 /* Module Loadable parameters. */
479 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
480 S2IO_PARM_INT(rx_ring_num, 1);
481 S2IO_PARM_INT(multiq, 0);
482 S2IO_PARM_INT(rx_ring_mode, 1);
483 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
484 S2IO_PARM_INT(rmac_pause_time, 0x100);
485 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
486 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
487 S2IO_PARM_INT(shared_splits, 0);
488 S2IO_PARM_INT(tmac_util_period, 5);
489 S2IO_PARM_INT(rmac_util_period, 5);
490 S2IO_PARM_INT(l3l4hdr_size, 128);
491 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
492 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
493 /* Frequency of Rx desc syncs expressed as power of 2 */
494 S2IO_PARM_INT(rxsync_frequency, 3);
495 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
496 S2IO_PARM_INT(intr_type, 2);
497 /* Large receive offload feature */
498 static unsigned int lro_enable;
499 module_param_named(lro, lro_enable, uint, 0);
500
501 /* Max pkts to be aggregated by LRO at one time. If not specified,
502  * aggregation happens until we hit max IP pkt size(64K)
503  */
504 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
505 S2IO_PARM_INT(indicate_max_pkts, 0);
506
507 S2IO_PARM_INT(napi, 1);
508 S2IO_PARM_INT(ufo, 0);
509 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
510
511 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
512 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
513 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
514 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
515 static unsigned int rts_frm_len[MAX_RX_RINGS] =
516 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
517
518 module_param_array(tx_fifo_len, uint, NULL, 0);
519 module_param_array(rx_ring_sz, uint, NULL, 0);
520 module_param_array(rts_frm_len, uint, NULL, 0);
521
522 /*
523  * S2IO device table.
524  * This table lists all the devices that this driver supports.
525  */
526 static struct pci_device_id s2io_tbl[] __devinitdata = {
527         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
528          PCI_ANY_ID, PCI_ANY_ID},
529         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
530          PCI_ANY_ID, PCI_ANY_ID},
531         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
532          PCI_ANY_ID, PCI_ANY_ID},
533         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
534          PCI_ANY_ID, PCI_ANY_ID},
535         {0,}
536 };
537
538 MODULE_DEVICE_TABLE(pci, s2io_tbl);
539
540 static struct pci_error_handlers s2io_err_handler = {
541         .error_detected = s2io_io_error_detected,
542         .slot_reset = s2io_io_slot_reset,
543         .resume = s2io_io_resume,
544 };
545
546 static struct pci_driver s2io_driver = {
547         .name = "S2IO",
548         .id_table = s2io_tbl,
549         .probe = s2io_init_nic,
550         .remove = __devexit_p(s2io_rem_nic),
551         .err_handler = &s2io_err_handler,
552 };
553
554 /* A simplifier macro used both by init and free shared_mem Fns(). */
555 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
556
557 /* netqueue manipulation helper functions */
558 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
559 {
560         if (!sp->config.multiq) {
561                 int i;
562
563                 for (i = 0; i < sp->config.tx_fifo_num; i++)
564                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
565         }
566         netif_tx_stop_all_queues(sp->dev);
567 }
568
569 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
570 {
571         if (!sp->config.multiq)
572                 sp->mac_control.fifos[fifo_no].queue_state =
573                         FIFO_QUEUE_STOP;
574
575         netif_tx_stop_all_queues(sp->dev);
576 }
577
578 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
579 {
580         if (!sp->config.multiq) {
581                 int i;
582
583                 for (i = 0; i < sp->config.tx_fifo_num; i++)
584                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
585         }
586         netif_tx_start_all_queues(sp->dev);
587 }
588
589 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
590 {
591         if (!sp->config.multiq)
592                 sp->mac_control.fifos[fifo_no].queue_state =
593                         FIFO_QUEUE_START;
594
595         netif_tx_start_all_queues(sp->dev);
596 }
597
598 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
599 {
600         if (!sp->config.multiq) {
601                 int i;
602
603                 for (i = 0; i < sp->config.tx_fifo_num; i++)
604                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
605         }
606         netif_tx_wake_all_queues(sp->dev);
607 }
608
609 static inline void s2io_wake_tx_queue(
610         struct fifo_info *fifo, int cnt, u8 multiq)
611 {
612
613         if (multiq) {
614                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
615                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
616         } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
617                 if (netif_queue_stopped(fifo->dev)) {
618                         fifo->queue_state = FIFO_QUEUE_START;
619                         netif_wake_queue(fifo->dev);
620                 }
621         }
622 }
623
624 /**
625  * init_shared_mem - Allocation and Initialization of Memory
626  * @nic: Device private variable.
627  * Description: The function allocates all the memory areas shared
628  * between the NIC and the driver. This includes Tx descriptors,
629  * Rx descriptors and the statistics block.
630  */
631
632 static int init_shared_mem(struct s2io_nic *nic)
633 {
634         u32 size;
635         void *tmp_v_addr, *tmp_v_addr_next;
636         dma_addr_t tmp_p_addr, tmp_p_addr_next;
637         struct RxD_block *pre_rxd_blk = NULL;
638         int i, j, blk_cnt;
639         int lst_size, lst_per_page;
640         struct net_device *dev = nic->dev;
641         unsigned long tmp;
642         struct buffAdd *ba;
643
644         struct mac_info *mac_control;
645         struct config_param *config;
646         unsigned long long mem_allocated = 0;
647
648         mac_control = &nic->mac_control;
649         config = &nic->config;
650
651         /* Allocation and initialization of TXDLs in FIFOs */
652         size = 0;
653         for (i = 0; i < config->tx_fifo_num; i++) {
654                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
655
656                 size += tx_cfg->fifo_len;
657         }
658         if (size > MAX_AVAILABLE_TXDS) {
659                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
660                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n",
661                           size);
662                 return -EINVAL;
663         }
664
665         size = 0;
666         for (i = 0; i < config->tx_fifo_num; i++) {
667                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
668
669                 size = tx_cfg->fifo_len;
670                 /*
671                  * Legal values are from 2 to 8192
672                  */
673                 if (size < 2) {
674                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
675                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
676                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
677                                   "are 2 to 8192\n");
678                         return -EINVAL;
679                 }
680         }
681
682         lst_size = (sizeof(struct TxD) * config->max_txds);
683         lst_per_page = PAGE_SIZE / lst_size;
684
685         for (i = 0; i < config->tx_fifo_num; i++) {
686                 struct fifo_info *fifo = &mac_control->fifos[i];
687                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
688                 int fifo_len = tx_cfg->fifo_len;
689                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
690
691                 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
692                 if (!fifo->list_info) {
693                         DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
694                         return -ENOMEM;
695                 }
696                 mem_allocated += list_holder_size;
697         }
698         for (i = 0; i < config->tx_fifo_num; i++) {
699                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
700                                                 lst_per_page);
701                 struct fifo_info *fifo = &mac_control->fifos[i];
702                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
703
704                 fifo->tx_curr_put_info.offset = 0;
705                 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
706                 fifo->tx_curr_get_info.offset = 0;
707                 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
708                 fifo->fifo_no = i;
709                 fifo->nic = nic;
710                 fifo->max_txds = MAX_SKB_FRAGS + 2;
711                 fifo->dev = dev;
712
713                 for (j = 0; j < page_num; j++) {
714                         int k = 0;
715                         dma_addr_t tmp_p;
716                         void *tmp_v;
717                         tmp_v = pci_alloc_consistent(nic->pdev,
718                                                      PAGE_SIZE, &tmp_p);
719                         if (!tmp_v) {
720                                 DBG_PRINT(INFO_DBG, "pci_alloc_consistent ");
721                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
722                                 return -ENOMEM;
723                         }
724                         /* If we got a zero DMA address(can happen on
725                          * certain platforms like PPC), reallocate.
726                          * Store virtual address of page we don't want,
727                          * to be freed later.
728                          */
729                         if (!tmp_p) {
730                                 mac_control->zerodma_virt_addr = tmp_v;
731                                 DBG_PRINT(INIT_DBG,
732                                           "%s: Zero DMA address for TxDL. ",
733                                           dev->name);
734                                 DBG_PRINT(INIT_DBG,
735                                           "Virtual address %p\n", tmp_v);
736                                 tmp_v = pci_alloc_consistent(nic->pdev,
737                                                              PAGE_SIZE, &tmp_p);
738                                 if (!tmp_v) {
739                                         DBG_PRINT(INFO_DBG,
740                                                   "pci_alloc_consistent ");
741                                         DBG_PRINT(INFO_DBG,
742                                                   "failed for TxDL\n");
743                                         return -ENOMEM;
744                                 }
745                                 mem_allocated += PAGE_SIZE;
746                         }
747                         while (k < lst_per_page) {
748                                 int l = (j * lst_per_page) + k;
749                                 if (l == tx_cfg->fifo_len)
750                                         break;
751                                 fifo->list_info[l].list_virt_addr =
752                                         tmp_v + (k * lst_size);
753                                 fifo->list_info[l].list_phy_addr =
754                                         tmp_p + (k * lst_size);
755                                 k++;
756                         }
757                 }
758         }
759
760         for (i = 0; i < config->tx_fifo_num; i++) {
761                 struct fifo_info *fifo = &mac_control->fifos[i];
762                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
763
764                 size = tx_cfg->fifo_len;
765                 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
766                 if (!fifo->ufo_in_band_v)
767                         return -ENOMEM;
768                 mem_allocated += (size * sizeof(u64));
769         }
770
771         /* Allocation and initialization of RXDs in Rings */
772         size = 0;
773         for (i = 0; i < config->rx_ring_num; i++) {
774                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
775                 struct ring_info *ring = &mac_control->rings[i];
776
777                 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
778                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
779                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", i);
780                         DBG_PRINT(ERR_DBG, "RxDs per Block");
781                         return FAILURE;
782                 }
783                 size += rx_cfg->num_rxd;
784                 ring->block_count = rx_cfg->num_rxd /
785                         (rxd_count[nic->rxd_mode] + 1);
786                 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
787         }
788         if (nic->rxd_mode == RXD_MODE_1)
789                 size = (size * (sizeof(struct RxD1)));
790         else
791                 size = (size * (sizeof(struct RxD3)));
792
793         for (i = 0; i < config->rx_ring_num; i++) {
794                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
795                 struct ring_info *ring = &mac_control->rings[i];
796
797                 ring->rx_curr_get_info.block_index = 0;
798                 ring->rx_curr_get_info.offset = 0;
799                 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
800                 ring->rx_curr_put_info.block_index = 0;
801                 ring->rx_curr_put_info.offset = 0;
802                 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
803                 ring->nic = nic;
804                 ring->ring_no = i;
805                 ring->lro = lro_enable;
806
807                 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
808                 /*  Allocating all the Rx blocks */
809                 for (j = 0; j < blk_cnt; j++) {
810                         struct rx_block_info *rx_blocks;
811                         int l;
812
813                         rx_blocks = &ring->rx_blocks[j];
814                         size = SIZE_OF_BLOCK;   /* size is always page size */
815                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
816                                                           &tmp_p_addr);
817                         if (tmp_v_addr == NULL) {
818                                 /*
819                                  * In case of failure, free_shared_mem()
820                                  * is called, which should free any
821                                  * memory that was alloced till the
822                                  * failure happened.
823                                  */
824                                 rx_blocks->block_virt_addr = tmp_v_addr;
825                                 return -ENOMEM;
826                         }
827                         mem_allocated += size;
828                         memset(tmp_v_addr, 0, size);
829
830                         size = sizeof(struct rxd_info) *
831                                 rxd_count[nic->rxd_mode];
832                         rx_blocks->block_virt_addr = tmp_v_addr;
833                         rx_blocks->block_dma_addr = tmp_p_addr;
834                         rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
835                         if (!rx_blocks->rxds)
836                                 return -ENOMEM;
837                         mem_allocated += size;
838                         for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
839                                 rx_blocks->rxds[l].virt_addr =
840                                         rx_blocks->block_virt_addr +
841                                         (rxd_size[nic->rxd_mode] * l);
842                                 rx_blocks->rxds[l].dma_addr =
843                                         rx_blocks->block_dma_addr +
844                                         (rxd_size[nic->rxd_mode] * l);
845                         }
846                 }
847                 /* Interlinking all Rx Blocks */
848                 for (j = 0; j < blk_cnt; j++) {
849                         int next = (j + 1) % blk_cnt;
850                         tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
851                         tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
852                         tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
853                         tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
854
855                         pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
856                         pre_rxd_blk->reserved_2_pNext_RxD_block =
857                                 (unsigned long)tmp_v_addr_next;
858                         pre_rxd_blk->pNext_RxD_Blk_physical =
859                                 (u64)tmp_p_addr_next;
860                 }
861         }
862         if (nic->rxd_mode == RXD_MODE_3B) {
863                 /*
864                  * Allocation of Storages for buffer addresses in 2BUFF mode
865                  * and the buffers as well.
866                  */
867                 for (i = 0; i < config->rx_ring_num; i++) {
868                         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
869                         struct ring_info *ring = &mac_control->rings[i];
870
871                         blk_cnt = rx_cfg->num_rxd /
872                                 (rxd_count[nic->rxd_mode] + 1);
873                         size = sizeof(struct buffAdd *) * blk_cnt;
874                         ring->ba = kmalloc(size, GFP_KERNEL);
875                         if (!ring->ba)
876                                 return -ENOMEM;
877                         mem_allocated += size;
878                         for (j = 0; j < blk_cnt; j++) {
879                                 int k = 0;
880
881                                 size = sizeof(struct buffAdd) *
882                                         (rxd_count[nic->rxd_mode] + 1);
883                                 ring->ba[j] = kmalloc(size, GFP_KERNEL);
884                                 if (!ring->ba[j])
885                                         return -ENOMEM;
886                                 mem_allocated += size;
887                                 while (k != rxd_count[nic->rxd_mode]) {
888                                         ba = &ring->ba[j][k];
889                                         size = BUF0_LEN + ALIGN_SIZE;
890                                         ba->ba_0_org = kmalloc(size, GFP_KERNEL);
891                                         if (!ba->ba_0_org)
892                                                 return -ENOMEM;
893                                         mem_allocated += size;
894                                         tmp = (unsigned long)ba->ba_0_org;
895                                         tmp += ALIGN_SIZE;
896                                         tmp &= ~((unsigned long)ALIGN_SIZE);
897                                         ba->ba_0 = (void *)tmp;
898
899                                         size = BUF1_LEN + ALIGN_SIZE;
900                                         ba->ba_1_org = kmalloc(size, GFP_KERNEL);
901                                         if (!ba->ba_1_org)
902                                                 return -ENOMEM;
903                                         mem_allocated += size;
904                                         tmp = (unsigned long)ba->ba_1_org;
905                                         tmp += ALIGN_SIZE;
906                                         tmp &= ~((unsigned long)ALIGN_SIZE);
907                                         ba->ba_1 = (void *)tmp;
908                                         k++;
909                                 }
910                         }
911                 }
912         }
913
914         /* Allocation and initialization of Statistics block */
915         size = sizeof(struct stat_block);
916         mac_control->stats_mem =
917                 pci_alloc_consistent(nic->pdev, size,
918                                      &mac_control->stats_mem_phy);
919
920         if (!mac_control->stats_mem) {
921                 /*
922                  * In case of failure, free_shared_mem() is called, which
923                  * should free any memory that was alloced till the
924                  * failure happened.
925                  */
926                 return -ENOMEM;
927         }
928         mem_allocated += size;
929         mac_control->stats_mem_sz = size;
930
931         tmp_v_addr = mac_control->stats_mem;
932         mac_control->stats_info = (struct stat_block *)tmp_v_addr;
933         memset(tmp_v_addr, 0, size);
934         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
935                   (unsigned long long)tmp_p_addr);
936         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
937         return SUCCESS;
938 }
939
940 /**
941  * free_shared_mem - Free the allocated Memory
942  * @nic:  Device private variable.
943  * Description: This function is to free all memory locations allocated by
944  * the init_shared_mem() function and return it to the kernel.
945  */
946
947 static void free_shared_mem(struct s2io_nic *nic)
948 {
949         int i, j, blk_cnt, size;
950         void *tmp_v_addr;
951         dma_addr_t tmp_p_addr;
952         struct mac_info *mac_control;
953         struct config_param *config;
954         int lst_size, lst_per_page;
955         struct net_device *dev;
956         int page_num = 0;
957
958         if (!nic)
959                 return;
960
961         dev = nic->dev;
962
963         mac_control = &nic->mac_control;
964         config = &nic->config;
965
966         lst_size = sizeof(struct TxD) * config->max_txds;
967         lst_per_page = PAGE_SIZE / lst_size;
968
969         for (i = 0; i < config->tx_fifo_num; i++) {
970                 struct fifo_info *fifo = &mac_control->fifos[i];
971                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
972
973                 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
974                 for (j = 0; j < page_num; j++) {
975                         int mem_blks = (j * lst_per_page);
976                         struct list_info_hold *fli;
977
978                         if (!fifo->list_info)
979                                 return;
980
981                         fli = &fifo->list_info[mem_blks];
982                         if (!fli->list_virt_addr)
983                                 break;
984                         pci_free_consistent(nic->pdev, PAGE_SIZE,
985                                             fli->list_virt_addr,
986                                             fli->list_phy_addr);
987                         nic->mac_control.stats_info->sw_stat.mem_freed
988                                 += PAGE_SIZE;
989                 }
990                 /* If we got a zero DMA address during allocation,
991                  * free the page now
992                  */
993                 if (mac_control->zerodma_virt_addr) {
994                         pci_free_consistent(nic->pdev, PAGE_SIZE,
995                                             mac_control->zerodma_virt_addr,
996                                             (dma_addr_t)0);
997                         DBG_PRINT(INIT_DBG,
998                                   "%s: Freeing TxDL with zero DMA addr. ",
999                                   dev->name);
1000                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
1001                                   mac_control->zerodma_virt_addr);
1002                         nic->mac_control.stats_info->sw_stat.mem_freed
1003                                 += PAGE_SIZE;
1004                 }
1005                 kfree(fifo->list_info);
1006                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1007                         nic->config.tx_cfg[i].fifo_len *
1008                         sizeof(struct list_info_hold);
1009         }
1010
1011         size = SIZE_OF_BLOCK;
1012         for (i = 0; i < config->rx_ring_num; i++) {
1013                 struct ring_info *ring = &mac_control->rings[i];
1014
1015                 blk_cnt = ring->block_count;
1016                 for (j = 0; j < blk_cnt; j++) {
1017                         tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
1018                         tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1019                         if (tmp_v_addr == NULL)
1020                                 break;
1021                         pci_free_consistent(nic->pdev, size,
1022                                             tmp_v_addr, tmp_p_addr);
1023                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1024                         kfree(ring->rx_blocks[j].rxds);
1025                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1026                                 sizeof(struct rxd_info) * rxd_count[nic->rxd_mode];
1027                 }
1028         }
1029
1030         if (nic->rxd_mode == RXD_MODE_3B) {
1031                 /* Freeing buffer storage addresses in 2BUFF mode. */
1032                 for (i = 0; i < config->rx_ring_num; i++) {
1033                         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1034                         struct ring_info *ring = &mac_control->rings[i];
1035
1036                         blk_cnt = rx_cfg->num_rxd /
1037                                 (rxd_count[nic->rxd_mode] + 1);
1038                         for (j = 0; j < blk_cnt; j++) {
1039                                 int k = 0;
1040                                 if (!ring->ba[j])
1041                                         continue;
1042                                 while (k != rxd_count[nic->rxd_mode]) {
1043                                         struct buffAdd *ba = &ring->ba[j][k];
1044                                         kfree(ba->ba_0_org);
1045                                         nic->mac_control.stats_info->sw_stat.\
1046                                                 mem_freed += (BUF0_LEN + ALIGN_SIZE);
1047                                         kfree(ba->ba_1_org);
1048                                         nic->mac_control.stats_info->sw_stat.\
1049                                                 mem_freed += (BUF1_LEN + ALIGN_SIZE);
1050                                         k++;
1051                                 }
1052                                 kfree(ring->ba[j]);
1053                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1054                                         (sizeof(struct buffAdd) *
1055                                          (rxd_count[nic->rxd_mode] + 1));
1056                         }
1057                         kfree(ring->ba);
1058                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1059                                 (sizeof(struct buffAdd *) * blk_cnt);
1060                 }
1061         }
1062
1063         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1064                 struct fifo_info *fifo = &mac_control->fifos[i];
1065                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1066
1067                 if (fifo->ufo_in_band_v) {
1068                         nic->mac_control.stats_info->sw_stat.mem_freed
1069                                 += (tx_cfg->fifo_len * sizeof(u64));
1070                         kfree(fifo->ufo_in_band_v);
1071                 }
1072         }
1073
1074         if (mac_control->stats_mem) {
1075                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1076                         mac_control->stats_mem_sz;
1077                 pci_free_consistent(nic->pdev,
1078                                     mac_control->stats_mem_sz,
1079                                     mac_control->stats_mem,
1080                                     mac_control->stats_mem_phy);
1081         }
1082 }
1083
1084 /**
1085  * s2io_verify_pci_mode -
1086  */
1087
1088 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1089 {
1090         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1091         register u64 val64 = 0;
1092         int     mode;
1093
1094         val64 = readq(&bar0->pci_mode);
1095         mode = (u8)GET_PCI_MODE(val64);
1096
1097         if (val64 & PCI_MODE_UNKNOWN_MODE)
1098                 return -1;      /* Unknown PCI mode */
1099         return mode;
1100 }
1101
1102 #define NEC_VENID   0x1033
1103 #define NEC_DEVID   0x0125
1104 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1105 {
1106         struct pci_dev *tdev = NULL;
1107         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1108                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1109                         if (tdev->bus == s2io_pdev->bus->parent) {
1110                                 pci_dev_put(tdev);
1111                                 return 1;
1112                         }
1113                 }
1114         }
1115         return 0;
1116 }
1117
1118 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1119 /**
1120  * s2io_print_pci_mode -
1121  */
1122 static int s2io_print_pci_mode(struct s2io_nic *nic)
1123 {
1124         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1125         register u64 val64 = 0;
1126         int     mode;
1127         struct config_param *config = &nic->config;
1128
1129         val64 = readq(&bar0->pci_mode);
1130         mode = (u8)GET_PCI_MODE(val64);
1131
1132         if (val64 & PCI_MODE_UNKNOWN_MODE)
1133                 return -1;      /* Unknown PCI mode */
1134
1135         config->bus_speed = bus_speed[mode];
1136
1137         if (s2io_on_nec_bridge(nic->pdev)) {
1138                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1139                           nic->dev->name);
1140                 return mode;
1141         }
1142
1143         DBG_PRINT(ERR_DBG, "%s: Device is on %d bit ",
1144                   nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64);
1145
1146         switch (mode) {
1147         case PCI_MODE_PCI_33:
1148                 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1149                 break;
1150         case PCI_MODE_PCI_66:
1151                 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1152                 break;
1153         case PCI_MODE_PCIX_M1_66:
1154                 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1155                 break;
1156         case PCI_MODE_PCIX_M1_100:
1157                 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1158                 break;
1159         case PCI_MODE_PCIX_M1_133:
1160                 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1161                 break;
1162         case PCI_MODE_PCIX_M2_66:
1163                 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1164                 break;
1165         case PCI_MODE_PCIX_M2_100:
1166                 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1167                 break;
1168         case PCI_MODE_PCIX_M2_133:
1169                 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1170                 break;
1171         default:
1172                 return -1;      /* Unsupported bus speed */
1173         }
1174
1175         return mode;
1176 }
1177
1178 /**
1179  *  init_tti - Initialization transmit traffic interrupt scheme
1180  *  @nic: device private variable
1181  *  @link: link status (UP/DOWN) used to enable/disable continuous
1182  *  transmit interrupts
1183  *  Description: The function configures transmit traffic interrupts
1184  *  Return Value:  SUCCESS on success and
1185  *  '-1' on failure
1186  */
1187
1188 static int init_tti(struct s2io_nic *nic, int link)
1189 {
1190         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1191         register u64 val64 = 0;
1192         int i;
1193         struct config_param *config;
1194
1195         config = &nic->config;
1196
1197         for (i = 0; i < config->tx_fifo_num; i++) {
1198                 /*
1199                  * TTI Initialization. Default Tx timer gets us about
1200                  * 250 interrupts per sec. Continuous interrupts are enabled
1201                  * by default.
1202                  */
1203                 if (nic->device_type == XFRAME_II_DEVICE) {
1204                         int count = (nic->config.bus_speed * 125)/2;
1205                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1206                 } else
1207                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1208
1209                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1210                         TTI_DATA1_MEM_TX_URNG_B(0x10) |
1211                         TTI_DATA1_MEM_TX_URNG_C(0x30) |
1212                         TTI_DATA1_MEM_TX_TIMER_AC_EN;
1213                 if (i == 0)
1214                         if (use_continuous_tx_intrs && (link == LINK_UP))
1215                                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1216                 writeq(val64, &bar0->tti_data1_mem);
1217
1218                 if (nic->config.intr_type == MSI_X) {
1219                         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1220                                 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1221                                 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1222                                 TTI_DATA2_MEM_TX_UFC_D(0x300);
1223                 } else {
1224                         if ((nic->config.tx_steering_type ==
1225                              TX_DEFAULT_STEERING) &&
1226                             (config->tx_fifo_num > 1) &&
1227                             (i >= nic->udp_fifo_idx) &&
1228                             (i < (nic->udp_fifo_idx +
1229                                   nic->total_udp_fifos)))
1230                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1231                                         TTI_DATA2_MEM_TX_UFC_B(0x80) |
1232                                         TTI_DATA2_MEM_TX_UFC_C(0x100) |
1233                                         TTI_DATA2_MEM_TX_UFC_D(0x120);
1234                         else
1235                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1236                                         TTI_DATA2_MEM_TX_UFC_B(0x20) |
1237                                         TTI_DATA2_MEM_TX_UFC_C(0x40) |
1238                                         TTI_DATA2_MEM_TX_UFC_D(0x80);
1239                 }
1240
1241                 writeq(val64, &bar0->tti_data2_mem);
1242
1243                 val64 = TTI_CMD_MEM_WE |
1244                         TTI_CMD_MEM_STROBE_NEW_CMD |
1245                         TTI_CMD_MEM_OFFSET(i);
1246                 writeq(val64, &bar0->tti_command_mem);
1247
1248                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1249                                           TTI_CMD_MEM_STROBE_NEW_CMD,
1250                                           S2IO_BIT_RESET) != SUCCESS)
1251                         return FAILURE;
1252         }
1253
1254         return SUCCESS;
1255 }
1256
1257 /**
1258  *  init_nic - Initialization of hardware
1259  *  @nic: device private variable
1260  *  Description: The function sequentially configures every block
1261  *  of the H/W from their reset values.
1262  *  Return Value:  SUCCESS on success and
1263  *  '-1' on failure (endian settings incorrect).
1264  */
1265
1266 static int init_nic(struct s2io_nic *nic)
1267 {
1268         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1269         struct net_device *dev = nic->dev;
1270         register u64 val64 = 0;
1271         void __iomem *add;
1272         u32 time;
1273         int i, j;
1274         struct mac_info *mac_control;
1275         struct config_param *config;
1276         int dtx_cnt = 0;
1277         unsigned long long mem_share;
1278         int mem_size;
1279
1280         mac_control = &nic->mac_control;
1281         config = &nic->config;
1282
1283         /* to set the swapper controle on the card */
1284         if (s2io_set_swapper(nic)) {
1285                 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1286                 return -EIO;
1287         }
1288
1289         /*
1290          * Herc requires EOI to be removed from reset before XGXS, so..
1291          */
1292         if (nic->device_type & XFRAME_II_DEVICE) {
1293                 val64 = 0xA500000000ULL;
1294                 writeq(val64, &bar0->sw_reset);
1295                 msleep(500);
1296                 val64 = readq(&bar0->sw_reset);
1297         }
1298
1299         /* Remove XGXS from reset state */
1300         val64 = 0;
1301         writeq(val64, &bar0->sw_reset);
1302         msleep(500);
1303         val64 = readq(&bar0->sw_reset);
1304
1305         /* Ensure that it's safe to access registers by checking
1306          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1307          */
1308         if (nic->device_type == XFRAME_II_DEVICE) {
1309                 for (i = 0; i < 50; i++) {
1310                         val64 = readq(&bar0->adapter_status);
1311                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1312                                 break;
1313                         msleep(10);
1314                 }
1315                 if (i == 50)
1316                         return -ENODEV;
1317         }
1318
1319         /*  Enable Receiving broadcasts */
1320         add = &bar0->mac_cfg;
1321         val64 = readq(&bar0->mac_cfg);
1322         val64 |= MAC_RMAC_BCAST_ENABLE;
1323         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1324         writel((u32)val64, add);
1325         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1326         writel((u32) (val64 >> 32), (add + 4));
1327
1328         /* Read registers in all blocks */
1329         val64 = readq(&bar0->mac_int_mask);
1330         val64 = readq(&bar0->mc_int_mask);
1331         val64 = readq(&bar0->xgxs_int_mask);
1332
1333         /*  Set MTU */
1334         val64 = dev->mtu;
1335         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1336
1337         if (nic->device_type & XFRAME_II_DEVICE) {
1338                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1339                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1340                                           &bar0->dtx_control, UF);
1341                         if (dtx_cnt & 0x1)
1342                                 msleep(1); /* Necessary!! */
1343                         dtx_cnt++;
1344                 }
1345         } else {
1346                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1347                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1348                                           &bar0->dtx_control, UF);
1349                         val64 = readq(&bar0->dtx_control);
1350                         dtx_cnt++;
1351                 }
1352         }
1353
1354         /*  Tx DMA Initialization */
1355         val64 = 0;
1356         writeq(val64, &bar0->tx_fifo_partition_0);
1357         writeq(val64, &bar0->tx_fifo_partition_1);
1358         writeq(val64, &bar0->tx_fifo_partition_2);
1359         writeq(val64, &bar0->tx_fifo_partition_3);
1360
1361         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1362                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1363
1364                 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1365                         vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1366
1367                 if (i == (config->tx_fifo_num - 1)) {
1368                         if (i % 2 == 0)
1369                                 i++;
1370                 }
1371
1372                 switch (i) {
1373                 case 1:
1374                         writeq(val64, &bar0->tx_fifo_partition_0);
1375                         val64 = 0;
1376                         j = 0;
1377                         break;
1378                 case 3:
1379                         writeq(val64, &bar0->tx_fifo_partition_1);
1380                         val64 = 0;
1381                         j = 0;
1382                         break;
1383                 case 5:
1384                         writeq(val64, &bar0->tx_fifo_partition_2);
1385                         val64 = 0;
1386                         j = 0;
1387                         break;
1388                 case 7:
1389                         writeq(val64, &bar0->tx_fifo_partition_3);
1390                         val64 = 0;
1391                         j = 0;
1392                         break;
1393                 default:
1394                         j++;
1395                         break;
1396                 }
1397         }
1398
1399         /*
1400          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1401          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1402          */
1403         if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1404                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1405
1406         val64 = readq(&bar0->tx_fifo_partition_0);
1407         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1408                   &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1409
1410         /*
1411          * Initialization of Tx_PA_CONFIG register to ignore packet
1412          * integrity checking.
1413          */
1414         val64 = readq(&bar0->tx_pa_cfg);
1415         val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1416                 TX_PA_CFG_IGNORE_SNAP_OUI |
1417                 TX_PA_CFG_IGNORE_LLC_CTRL |
1418                 TX_PA_CFG_IGNORE_L2_ERR;
1419         writeq(val64, &bar0->tx_pa_cfg);
1420
1421         /* Rx DMA intialization. */
1422         val64 = 0;
1423         for (i = 0; i < config->rx_ring_num; i++) {
1424                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1425
1426                 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1427         }
1428         writeq(val64, &bar0->rx_queue_priority);
1429
1430         /*
1431          * Allocating equal share of memory to all the
1432          * configured Rings.
1433          */
1434         val64 = 0;
1435         if (nic->device_type & XFRAME_II_DEVICE)
1436                 mem_size = 32;
1437         else
1438                 mem_size = 64;
1439
1440         for (i = 0; i < config->rx_ring_num; i++) {
1441                 switch (i) {
1442                 case 0:
1443                         mem_share = (mem_size / config->rx_ring_num +
1444                                      mem_size % config->rx_ring_num);
1445                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1446                         continue;
1447                 case 1:
1448                         mem_share = (mem_size / config->rx_ring_num);
1449                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1450                         continue;
1451                 case 2:
1452                         mem_share = (mem_size / config->rx_ring_num);
1453                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1454                         continue;
1455                 case 3:
1456                         mem_share = (mem_size / config->rx_ring_num);
1457                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1458                         continue;
1459                 case 4:
1460                         mem_share = (mem_size / config->rx_ring_num);
1461                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1462                         continue;
1463                 case 5:
1464                         mem_share = (mem_size / config->rx_ring_num);
1465                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1466                         continue;
1467                 case 6:
1468                         mem_share = (mem_size / config->rx_ring_num);
1469                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1470                         continue;
1471                 case 7:
1472                         mem_share = (mem_size / config->rx_ring_num);
1473                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1474                         continue;
1475                 }
1476         }
1477         writeq(val64, &bar0->rx_queue_cfg);
1478
1479         /*
1480          * Filling Tx round robin registers
1481          * as per the number of FIFOs for equal scheduling priority
1482          */
1483         switch (config->tx_fifo_num) {
1484         case 1:
1485                 val64 = 0x0;
1486                 writeq(val64, &bar0->tx_w_round_robin_0);
1487                 writeq(val64, &bar0->tx_w_round_robin_1);
1488                 writeq(val64, &bar0->tx_w_round_robin_2);
1489                 writeq(val64, &bar0->tx_w_round_robin_3);
1490                 writeq(val64, &bar0->tx_w_round_robin_4);
1491                 break;
1492         case 2:
1493                 val64 = 0x0001000100010001ULL;
1494                 writeq(val64, &bar0->tx_w_round_robin_0);
1495                 writeq(val64, &bar0->tx_w_round_robin_1);
1496                 writeq(val64, &bar0->tx_w_round_robin_2);
1497                 writeq(val64, &bar0->tx_w_round_robin_3);
1498                 val64 = 0x0001000100000000ULL;
1499                 writeq(val64, &bar0->tx_w_round_robin_4);
1500                 break;
1501         case 3:
1502                 val64 = 0x0001020001020001ULL;
1503                 writeq(val64, &bar0->tx_w_round_robin_0);
1504                 val64 = 0x0200010200010200ULL;
1505                 writeq(val64, &bar0->tx_w_round_robin_1);
1506                 val64 = 0x0102000102000102ULL;
1507                 writeq(val64, &bar0->tx_w_round_robin_2);
1508                 val64 = 0x0001020001020001ULL;
1509                 writeq(val64, &bar0->tx_w_round_robin_3);
1510                 val64 = 0x0200010200000000ULL;
1511                 writeq(val64, &bar0->tx_w_round_robin_4);
1512                 break;
1513         case 4:
1514                 val64 = 0x0001020300010203ULL;
1515                 writeq(val64, &bar0->tx_w_round_robin_0);
1516                 writeq(val64, &bar0->tx_w_round_robin_1);
1517                 writeq(val64, &bar0->tx_w_round_robin_2);
1518                 writeq(val64, &bar0->tx_w_round_robin_3);
1519                 val64 = 0x0001020300000000ULL;
1520                 writeq(val64, &bar0->tx_w_round_robin_4);
1521                 break;
1522         case 5:
1523                 val64 = 0x0001020304000102ULL;
1524                 writeq(val64, &bar0->tx_w_round_robin_0);
1525                 val64 = 0x0304000102030400ULL;
1526                 writeq(val64, &bar0->tx_w_round_robin_1);
1527                 val64 = 0x0102030400010203ULL;
1528                 writeq(val64, &bar0->tx_w_round_robin_2);
1529                 val64 = 0x0400010203040001ULL;
1530                 writeq(val64, &bar0->tx_w_round_robin_3);
1531                 val64 = 0x0203040000000000ULL;
1532                 writeq(val64, &bar0->tx_w_round_robin_4);
1533                 break;
1534         case 6:
1535                 val64 = 0x0001020304050001ULL;
1536                 writeq(val64, &bar0->tx_w_round_robin_0);
1537                 val64 = 0x0203040500010203ULL;
1538                 writeq(val64, &bar0->tx_w_round_robin_1);
1539                 val64 = 0x0405000102030405ULL;
1540                 writeq(val64, &bar0->tx_w_round_robin_2);
1541                 val64 = 0x0001020304050001ULL;
1542                 writeq(val64, &bar0->tx_w_round_robin_3);
1543                 val64 = 0x0203040500000000ULL;
1544                 writeq(val64, &bar0->tx_w_round_robin_4);
1545                 break;
1546         case 7:
1547                 val64 = 0x0001020304050600ULL;
1548                 writeq(val64, &bar0->tx_w_round_robin_0);
1549                 val64 = 0x0102030405060001ULL;
1550                 writeq(val64, &bar0->tx_w_round_robin_1);
1551                 val64 = 0x0203040506000102ULL;
1552                 writeq(val64, &bar0->tx_w_round_robin_2);
1553                 val64 = 0x0304050600010203ULL;
1554                 writeq(val64, &bar0->tx_w_round_robin_3);
1555                 val64 = 0x0405060000000000ULL;
1556                 writeq(val64, &bar0->tx_w_round_robin_4);
1557                 break;
1558         case 8:
1559                 val64 = 0x0001020304050607ULL;
1560                 writeq(val64, &bar0->tx_w_round_robin_0);
1561                 writeq(val64, &bar0->tx_w_round_robin_1);
1562                 writeq(val64, &bar0->tx_w_round_robin_2);
1563                 writeq(val64, &bar0->tx_w_round_robin_3);
1564                 val64 = 0x0001020300000000ULL;
1565                 writeq(val64, &bar0->tx_w_round_robin_4);
1566                 break;
1567         }
1568
1569         /* Enable all configured Tx FIFO partitions */
1570         val64 = readq(&bar0->tx_fifo_partition_0);
1571         val64 |= (TX_FIFO_PARTITION_EN);
1572         writeq(val64, &bar0->tx_fifo_partition_0);
1573
1574         /* Filling the Rx round robin registers as per the
1575          * number of Rings and steering based on QoS with
1576          * equal priority.
1577          */
1578         switch (config->rx_ring_num) {
1579         case 1:
1580                 val64 = 0x0;
1581                 writeq(val64, &bar0->rx_w_round_robin_0);
1582                 writeq(val64, &bar0->rx_w_round_robin_1);
1583                 writeq(val64, &bar0->rx_w_round_robin_2);
1584                 writeq(val64, &bar0->rx_w_round_robin_3);
1585                 writeq(val64, &bar0->rx_w_round_robin_4);
1586
1587                 val64 = 0x8080808080808080ULL;
1588                 writeq(val64, &bar0->rts_qos_steering);
1589                 break;
1590         case 2:
1591                 val64 = 0x0001000100010001ULL;
1592                 writeq(val64, &bar0->rx_w_round_robin_0);
1593                 writeq(val64, &bar0->rx_w_round_robin_1);
1594                 writeq(val64, &bar0->rx_w_round_robin_2);
1595                 writeq(val64, &bar0->rx_w_round_robin_3);
1596                 val64 = 0x0001000100000000ULL;
1597                 writeq(val64, &bar0->rx_w_round_robin_4);
1598
1599                 val64 = 0x8080808040404040ULL;
1600                 writeq(val64, &bar0->rts_qos_steering);
1601                 break;
1602         case 3:
1603                 val64 = 0x0001020001020001ULL;
1604                 writeq(val64, &bar0->rx_w_round_robin_0);
1605                 val64 = 0x0200010200010200ULL;
1606                 writeq(val64, &bar0->rx_w_round_robin_1);
1607                 val64 = 0x0102000102000102ULL;
1608                 writeq(val64, &bar0->rx_w_round_robin_2);
1609                 val64 = 0x0001020001020001ULL;
1610                 writeq(val64, &bar0->rx_w_round_robin_3);
1611                 val64 = 0x0200010200000000ULL;
1612                 writeq(val64, &bar0->rx_w_round_robin_4);
1613
1614                 val64 = 0x8080804040402020ULL;
1615                 writeq(val64, &bar0->rts_qos_steering);
1616                 break;
1617         case 4:
1618                 val64 = 0x0001020300010203ULL;
1619                 writeq(val64, &bar0->rx_w_round_robin_0);
1620                 writeq(val64, &bar0->rx_w_round_robin_1);
1621                 writeq(val64, &bar0->rx_w_round_robin_2);
1622                 writeq(val64, &bar0->rx_w_round_robin_3);
1623                 val64 = 0x0001020300000000ULL;
1624                 writeq(val64, &bar0->rx_w_round_robin_4);
1625
1626                 val64 = 0x8080404020201010ULL;
1627                 writeq(val64, &bar0->rts_qos_steering);
1628                 break;
1629         case 5:
1630                 val64 = 0x0001020304000102ULL;
1631                 writeq(val64, &bar0->rx_w_round_robin_0);
1632                 val64 = 0x0304000102030400ULL;
1633                 writeq(val64, &bar0->rx_w_round_robin_1);
1634                 val64 = 0x0102030400010203ULL;
1635                 writeq(val64, &bar0->rx_w_round_robin_2);
1636                 val64 = 0x0400010203040001ULL;
1637                 writeq(val64, &bar0->rx_w_round_robin_3);
1638                 val64 = 0x0203040000000000ULL;
1639                 writeq(val64, &bar0->rx_w_round_robin_4);
1640
1641                 val64 = 0x8080404020201008ULL;
1642                 writeq(val64, &bar0->rts_qos_steering);
1643                 break;
1644         case 6:
1645                 val64 = 0x0001020304050001ULL;
1646                 writeq(val64, &bar0->rx_w_round_robin_0);
1647                 val64 = 0x0203040500010203ULL;
1648                 writeq(val64, &bar0->rx_w_round_robin_1);
1649                 val64 = 0x0405000102030405ULL;
1650                 writeq(val64, &bar0->rx_w_round_robin_2);
1651                 val64 = 0x0001020304050001ULL;
1652                 writeq(val64, &bar0->rx_w_round_robin_3);
1653                 val64 = 0x0203040500000000ULL;
1654                 writeq(val64, &bar0->rx_w_round_robin_4);
1655
1656                 val64 = 0x8080404020100804ULL;
1657                 writeq(val64, &bar0->rts_qos_steering);
1658                 break;
1659         case 7:
1660                 val64 = 0x0001020304050600ULL;
1661                 writeq(val64, &bar0->rx_w_round_robin_0);
1662                 val64 = 0x0102030405060001ULL;
1663                 writeq(val64, &bar0->rx_w_round_robin_1);
1664                 val64 = 0x0203040506000102ULL;
1665                 writeq(val64, &bar0->rx_w_round_robin_2);
1666                 val64 = 0x0304050600010203ULL;
1667                 writeq(val64, &bar0->rx_w_round_robin_3);
1668                 val64 = 0x0405060000000000ULL;
1669                 writeq(val64, &bar0->rx_w_round_robin_4);
1670
1671                 val64 = 0x8080402010080402ULL;
1672                 writeq(val64, &bar0->rts_qos_steering);
1673                 break;
1674         case 8:
1675                 val64 = 0x0001020304050607ULL;
1676                 writeq(val64, &bar0->rx_w_round_robin_0);
1677                 writeq(val64, &bar0->rx_w_round_robin_1);
1678                 writeq(val64, &bar0->rx_w_round_robin_2);
1679                 writeq(val64, &bar0->rx_w_round_robin_3);
1680                 val64 = 0x0001020300000000ULL;
1681                 writeq(val64, &bar0->rx_w_round_robin_4);
1682
1683                 val64 = 0x8040201008040201ULL;
1684                 writeq(val64, &bar0->rts_qos_steering);
1685                 break;
1686         }
1687
1688         /* UDP Fix */
1689         val64 = 0;
1690         for (i = 0; i < 8; i++)
1691                 writeq(val64, &bar0->rts_frm_len_n[i]);
1692
1693         /* Set the default rts frame length for the rings configured */
1694         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1695         for (i = 0 ; i < config->rx_ring_num ; i++)
1696                 writeq(val64, &bar0->rts_frm_len_n[i]);
1697
1698         /* Set the frame length for the configured rings
1699          * desired by the user
1700          */
1701         for (i = 0; i < config->rx_ring_num; i++) {
1702                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1703                  * specified frame length steering.
1704                  * If the user provides the frame length then program
1705                  * the rts_frm_len register for those values or else
1706                  * leave it as it is.
1707                  */
1708                 if (rts_frm_len[i] != 0) {
1709                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1710                                &bar0->rts_frm_len_n[i]);
1711                 }
1712         }
1713
1714         /* Disable differentiated services steering logic */
1715         for (i = 0; i < 64; i++) {
1716                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1717                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1718                                   dev->name);
1719                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1720                         return -ENODEV;
1721                 }
1722         }
1723
1724         /* Program statistics memory */
1725         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1726
1727         if (nic->device_type == XFRAME_II_DEVICE) {
1728                 val64 = STAT_BC(0x320);
1729                 writeq(val64, &bar0->stat_byte_cnt);
1730         }
1731
1732         /*
1733          * Initializing the sampling rate for the device to calculate the
1734          * bandwidth utilization.
1735          */
1736         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1737                 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1738         writeq(val64, &bar0->mac_link_util);
1739
1740         /*
1741          * Initializing the Transmit and Receive Traffic Interrupt
1742          * Scheme.
1743          */
1744
1745         /* Initialize TTI */
1746         if (SUCCESS != init_tti(nic, nic->last_link_state))
1747                 return -ENODEV;
1748
1749         /* RTI Initialization */
1750         if (nic->device_type == XFRAME_II_DEVICE) {
1751                 /*
1752                  * Programmed to generate Apprx 500 Intrs per
1753                  * second
1754                  */
1755                 int count = (nic->config.bus_speed * 125)/4;
1756                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1757         } else
1758                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1759         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1760                 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1761                 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1762                 RTI_DATA1_MEM_RX_TIMER_AC_EN;
1763
1764         writeq(val64, &bar0->rti_data1_mem);
1765
1766         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1767                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1768         if (nic->config.intr_type == MSI_X)
1769                 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1770                           RTI_DATA2_MEM_RX_UFC_D(0x40));
1771         else
1772                 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1773                           RTI_DATA2_MEM_RX_UFC_D(0x80));
1774         writeq(val64, &bar0->rti_data2_mem);
1775
1776         for (i = 0; i < config->rx_ring_num; i++) {
1777                 val64 = RTI_CMD_MEM_WE |
1778                         RTI_CMD_MEM_STROBE_NEW_CMD |
1779                         RTI_CMD_MEM_OFFSET(i);
1780                 writeq(val64, &bar0->rti_command_mem);
1781
1782                 /*
1783                  * Once the operation completes, the Strobe bit of the
1784                  * command register will be reset. We poll for this
1785                  * particular condition. We wait for a maximum of 500ms
1786                  * for the operation to complete, if it's not complete
1787                  * by then we return error.
1788                  */
1789                 time = 0;
1790                 while (true) {
1791                         val64 = readq(&bar0->rti_command_mem);
1792                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1793                                 break;
1794
1795                         if (time > 10) {
1796                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1797                                           dev->name);
1798                                 return -ENODEV;
1799                         }
1800                         time++;
1801                         msleep(50);
1802                 }
1803         }
1804
1805         /*
1806          * Initializing proper values as Pause threshold into all
1807          * the 8 Queues on Rx side.
1808          */
1809         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1810         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1811
1812         /* Disable RMAC PAD STRIPPING */
1813         add = &bar0->mac_cfg;
1814         val64 = readq(&bar0->mac_cfg);
1815         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1816         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1817         writel((u32) (val64), add);
1818         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1819         writel((u32) (val64 >> 32), (add + 4));
1820         val64 = readq(&bar0->mac_cfg);
1821
1822         /* Enable FCS stripping by adapter */
1823         add = &bar0->mac_cfg;
1824         val64 = readq(&bar0->mac_cfg);
1825         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1826         if (nic->device_type == XFRAME_II_DEVICE)
1827                 writeq(val64, &bar0->mac_cfg);
1828         else {
1829                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1830                 writel((u32) (val64), add);
1831                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1832                 writel((u32) (val64 >> 32), (add + 4));
1833         }
1834
1835         /*
1836          * Set the time value to be inserted in the pause frame
1837          * generated by xena.
1838          */
1839         val64 = readq(&bar0->rmac_pause_cfg);
1840         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1841         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1842         writeq(val64, &bar0->rmac_pause_cfg);
1843
1844         /*
1845          * Set the Threshold Limit for Generating the pause frame
1846          * If the amount of data in any Queue exceeds ratio of
1847          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1848          * pause frame is generated
1849          */
1850         val64 = 0;
1851         for (i = 0; i < 4; i++) {
1852                 val64 |= (((u64)0xFF00 |
1853                            nic->mac_control.mc_pause_threshold_q0q3)
1854                           << (i * 2 * 8));
1855         }
1856         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1857
1858         val64 = 0;
1859         for (i = 0; i < 4; i++) {
1860                 val64 |= (((u64)0xFF00 |
1861                            nic->mac_control.mc_pause_threshold_q4q7)
1862                           << (i * 2 * 8));
1863         }
1864         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1865
1866         /*
1867          * TxDMA will stop Read request if the number of read split has
1868          * exceeded the limit pointed by shared_splits
1869          */
1870         val64 = readq(&bar0->pic_control);
1871         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1872         writeq(val64, &bar0->pic_control);
1873
1874         if (nic->config.bus_speed == 266) {
1875                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1876                 writeq(0x0, &bar0->read_retry_delay);
1877                 writeq(0x0, &bar0->write_retry_delay);
1878         }
1879
1880         /*
1881          * Programming the Herc to split every write transaction
1882          * that does not start on an ADB to reduce disconnects.
1883          */
1884         if (nic->device_type == XFRAME_II_DEVICE) {
1885                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1886                         MISC_LINK_STABILITY_PRD(3);
1887                 writeq(val64, &bar0->misc_control);
1888                 val64 = readq(&bar0->pic_control2);
1889                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1890                 writeq(val64, &bar0->pic_control2);
1891         }
1892         if (strstr(nic->product_name, "CX4")) {
1893                 val64 = TMAC_AVG_IPG(0x17);
1894                 writeq(val64, &bar0->tmac_avg_ipg);
1895         }
1896
1897         return SUCCESS;
1898 }
1899 #define LINK_UP_DOWN_INTERRUPT          1
1900 #define MAC_RMAC_ERR_TIMER              2
1901
1902 static int s2io_link_fault_indication(struct s2io_nic *nic)
1903 {
1904         if (nic->device_type == XFRAME_II_DEVICE)
1905                 return LINK_UP_DOWN_INTERRUPT;
1906         else
1907                 return MAC_RMAC_ERR_TIMER;
1908 }
1909
1910 /**
1911  *  do_s2io_write_bits -  update alarm bits in alarm register
1912  *  @value: alarm bits
1913  *  @flag: interrupt status
1914  *  @addr: address value
1915  *  Description: update alarm bits in alarm register
1916  *  Return Value:
1917  *  NONE.
1918  */
1919 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1920 {
1921         u64 temp64;
1922
1923         temp64 = readq(addr);
1924
1925         if (flag == ENABLE_INTRS)
1926                 temp64 &= ~((u64)value);
1927         else
1928                 temp64 |= ((u64)value);
1929         writeq(temp64, addr);
1930 }
1931
1932 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1933 {
1934         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1935         register u64 gen_int_mask = 0;
1936         u64 interruptible;
1937
1938         writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1939         if (mask & TX_DMA_INTR) {
1940                 gen_int_mask |= TXDMA_INT_M;
1941
1942                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1943                                    TXDMA_PCC_INT | TXDMA_TTI_INT |
1944                                    TXDMA_LSO_INT | TXDMA_TPA_INT |
1945                                    TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1946
1947                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1948                                    PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1949                                    PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1950                                    &bar0->pfc_err_mask);
1951
1952                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1953                                    TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1954                                    TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1955
1956                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1957                                    PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1958                                    PCC_N_SERR | PCC_6_COF_OV_ERR |
1959                                    PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1960                                    PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1961                                    PCC_TXB_ECC_SG_ERR,
1962                                    flag, &bar0->pcc_err_mask);
1963
1964                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1965                                    TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1966
1967                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1968                                    LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1969                                    LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1970                                    flag, &bar0->lso_err_mask);
1971
1972                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1973                                    flag, &bar0->tpa_err_mask);
1974
1975                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1976         }
1977
1978         if (mask & TX_MAC_INTR) {
1979                 gen_int_mask |= TXMAC_INT_M;
1980                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1981                                    &bar0->mac_int_mask);
1982                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1983                                    TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1984                                    TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1985                                    flag, &bar0->mac_tmac_err_mask);
1986         }
1987
1988         if (mask & TX_XGXS_INTR) {
1989                 gen_int_mask |= TXXGXS_INT_M;
1990                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1991                                    &bar0->xgxs_int_mask);
1992                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1993                                    TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1994                                    flag, &bar0->xgxs_txgxs_err_mask);
1995         }
1996
1997         if (mask & RX_DMA_INTR) {
1998                 gen_int_mask |= RXDMA_INT_M;
1999                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
2000                                    RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
2001                                    flag, &bar0->rxdma_int_mask);
2002                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
2003                                    RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
2004                                    RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
2005                                    RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
2006                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
2007                                    PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
2008                                    PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2009                                    &bar0->prc_pcix_err_mask);
2010                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2011                                    RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2012                                    &bar0->rpa_err_mask);
2013                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2014                                    RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2015                                    RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2016                                    RDA_FRM_ECC_SG_ERR |
2017                                    RDA_MISC_ERR|RDA_PCIX_ERR,
2018                                    flag, &bar0->rda_err_mask);
2019                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2020                                    RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2021                                    flag, &bar0->rti_err_mask);
2022         }
2023
2024         if (mask & RX_MAC_INTR) {
2025                 gen_int_mask |= RXMAC_INT_M;
2026                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2027                                    &bar0->mac_int_mask);
2028                 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2029                                  RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2030                                  RMAC_DOUBLE_ECC_ERR);
2031                 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2032                         interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2033                 do_s2io_write_bits(interruptible,
2034                                    flag, &bar0->mac_rmac_err_mask);
2035         }
2036
2037         if (mask & RX_XGXS_INTR) {
2038                 gen_int_mask |= RXXGXS_INT_M;
2039                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2040                                    &bar0->xgxs_int_mask);
2041                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2042                                    &bar0->xgxs_rxgxs_err_mask);
2043         }
2044
2045         if (mask & MC_INTR) {
2046                 gen_int_mask |= MC_INT_M;
2047                 do_s2io_write_bits(MC_INT_MASK_MC_INT,
2048                                    flag, &bar0->mc_int_mask);
2049                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2050                                    MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2051                                    &bar0->mc_err_mask);
2052         }
2053         nic->general_int_mask = gen_int_mask;
2054
2055         /* Remove this line when alarm interrupts are enabled */
2056         nic->general_int_mask = 0;
2057 }
2058
2059 /**
2060  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2061  *  @nic: device private variable,
2062  *  @mask: A mask indicating which Intr block must be modified and,
2063  *  @flag: A flag indicating whether to enable or disable the Intrs.
2064  *  Description: This function will either disable or enable the interrupts
2065  *  depending on the flag argument. The mask argument can be used to
2066  *  enable/disable any Intr block.
2067  *  Return Value: NONE.
2068  */
2069
2070 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2071 {
2072         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2073         register u64 temp64 = 0, intr_mask = 0;
2074
2075         intr_mask = nic->general_int_mask;
2076
2077         /*  Top level interrupt classification */
2078         /*  PIC Interrupts */
2079         if (mask & TX_PIC_INTR) {
2080                 /*  Enable PIC Intrs in the general intr mask register */
2081                 intr_mask |= TXPIC_INT_M;
2082                 if (flag == ENABLE_INTRS) {
2083                         /*
2084                          * If Hercules adapter enable GPIO otherwise
2085                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2086                          * interrupts for now.
2087                          * TODO
2088                          */
2089                         if (s2io_link_fault_indication(nic) ==
2090                             LINK_UP_DOWN_INTERRUPT) {
2091                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2092                                                    &bar0->pic_int_mask);
2093                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2094                                                    &bar0->gpio_int_mask);
2095                         } else
2096                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2097                 } else if (flag == DISABLE_INTRS) {
2098                         /*
2099                          * Disable PIC Intrs in the general
2100                          * intr mask register
2101                          */
2102                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2103                 }
2104         }
2105
2106         /*  Tx traffic interrupts */
2107         if (mask & TX_TRAFFIC_INTR) {
2108                 intr_mask |= TXTRAFFIC_INT_M;
2109                 if (flag == ENABLE_INTRS) {
2110                         /*
2111                          * Enable all the Tx side interrupts
2112                          * writing 0 Enables all 64 TX interrupt levels
2113                          */
2114                         writeq(0x0, &bar0->tx_traffic_mask);
2115                 } else if (flag == DISABLE_INTRS) {
2116                         /*
2117                          * Disable Tx Traffic Intrs in the general intr mask
2118                          * register.
2119                          */
2120                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2121                 }
2122         }
2123
2124         /*  Rx traffic interrupts */
2125         if (mask & RX_TRAFFIC_INTR) {
2126                 intr_mask |= RXTRAFFIC_INT_M;
2127                 if (flag == ENABLE_INTRS) {
2128                         /* writing 0 Enables all 8 RX interrupt levels */
2129                         writeq(0x0, &bar0->rx_traffic_mask);
2130                 } else if (flag == DISABLE_INTRS) {
2131                         /*
2132                          * Disable Rx Traffic Intrs in the general intr mask
2133                          * register.
2134                          */
2135                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2136                 }
2137         }
2138
2139         temp64 = readq(&bar0->general_int_mask);
2140         if (flag == ENABLE_INTRS)
2141                 temp64 &= ~((u64)intr_mask);
2142         else
2143                 temp64 = DISABLE_ALL_INTRS;
2144         writeq(temp64, &bar0->general_int_mask);
2145
2146         nic->general_int_mask = readq(&bar0->general_int_mask);
2147 }
2148
2149 /**
2150  *  verify_pcc_quiescent- Checks for PCC quiescent state
2151  *  Return: 1 If PCC is quiescence
2152  *          0 If PCC is not quiescence
2153  */
2154 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2155 {
2156         int ret = 0, herc;
2157         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2158         u64 val64 = readq(&bar0->adapter_status);
2159
2160         herc = (sp->device_type == XFRAME_II_DEVICE);
2161
2162         if (flag == false) {
2163                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2164                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2165                                 ret = 1;
2166                 } else {
2167                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2168                                 ret = 1;
2169                 }
2170         } else {
2171                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2172                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2173                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2174                                 ret = 1;
2175                 } else {
2176                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2177                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2178                                 ret = 1;
2179                 }
2180         }
2181
2182         return ret;
2183 }
2184 /**
2185  *  verify_xena_quiescence - Checks whether the H/W is ready
2186  *  Description: Returns whether the H/W is ready to go or not. Depending
2187  *  on whether adapter enable bit was written or not the comparison
2188  *  differs and the calling function passes the input argument flag to
2189  *  indicate this.
2190  *  Return: 1 If xena is quiescence
2191  *          0 If Xena is not quiescence
2192  */
2193
2194 static int verify_xena_quiescence(struct s2io_nic *sp)
2195 {
2196         int  mode;
2197         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2198         u64 val64 = readq(&bar0->adapter_status);
2199         mode = s2io_verify_pci_mode(sp);
2200
2201         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2202                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2203                 return 0;
2204         }
2205         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2206                 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2207                 return 0;
2208         }
2209         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2210                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2211                 return 0;
2212         }
2213         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2214                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2215                 return 0;
2216         }
2217         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2218                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2219                 return 0;
2220         }
2221         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2222                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2223                 return 0;
2224         }
2225         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2226                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2227                 return 0;
2228         }
2229         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2230                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2231                 return 0;
2232         }
2233
2234         /*
2235          * In PCI 33 mode, the P_PLL is not used, and therefore,
2236          * the the P_PLL_LOCK bit in the adapter_status register will
2237          * not be asserted.
2238          */
2239         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2240             sp->device_type == XFRAME_II_DEVICE &&
2241             mode != PCI_MODE_PCI_33) {
2242                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2243                 return 0;
2244         }
2245         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2246               ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2247                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2248                 return 0;
2249         }
2250         return 1;
2251 }
2252
2253 /**
2254  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2255  * @sp: Pointer to device specifc structure
2256  * Description :
2257  * New procedure to clear mac address reading  problems on Alpha platforms
2258  *
2259  */
2260
2261 static void fix_mac_address(struct s2io_nic *sp)
2262 {
2263         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2264         u64 val64;
2265         int i = 0;
2266
2267         while (fix_mac[i] != END_SIGN) {
2268                 writeq(fix_mac[i++], &bar0->gpio_control);
2269                 udelay(10);
2270                 val64 = readq(&bar0->gpio_control);
2271         }
2272 }
2273
2274 /**
2275  *  start_nic - Turns the device on
2276  *  @nic : device private variable.
2277  *  Description:
2278  *  This function actually turns the device on. Before this  function is
2279  *  called,all Registers are configured from their reset states
2280  *  and shared memory is allocated but the NIC is still quiescent. On
2281  *  calling this function, the device interrupts are cleared and the NIC is
2282  *  literally switched on by writing into the adapter control register.
2283  *  Return Value:
2284  *  SUCCESS on success and -1 on failure.
2285  */
2286
2287 static int start_nic(struct s2io_nic *nic)
2288 {
2289         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2290         struct net_device *dev = nic->dev;
2291         register u64 val64 = 0;
2292         u16 subid, i;
2293         struct mac_info *mac_control;
2294         struct config_param *config;
2295
2296         mac_control = &nic->mac_control;
2297         config = &nic->config;
2298
2299         /*  PRC Initialization and configuration */
2300         for (i = 0; i < config->rx_ring_num; i++) {
2301                 struct ring_info *ring = &mac_control->rings[i];
2302
2303                 writeq((u64)ring->rx_blocks[0].block_dma_addr,
2304                        &bar0->prc_rxd0_n[i]);
2305
2306                 val64 = readq(&bar0->prc_ctrl_n[i]);
2307                 if (nic->rxd_mode == RXD_MODE_1)
2308                         val64 |= PRC_CTRL_RC_ENABLED;
2309                 else
2310                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2311                 if (nic->device_type == XFRAME_II_DEVICE)
2312                         val64 |= PRC_CTRL_GROUP_READS;
2313                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2314                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2315                 writeq(val64, &bar0->prc_ctrl_n[i]);
2316         }
2317
2318         if (nic->rxd_mode == RXD_MODE_3B) {
2319                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2320                 val64 = readq(&bar0->rx_pa_cfg);
2321                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2322                 writeq(val64, &bar0->rx_pa_cfg);
2323         }
2324
2325         if (vlan_tag_strip == 0) {
2326                 val64 = readq(&bar0->rx_pa_cfg);
2327                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2328                 writeq(val64, &bar0->rx_pa_cfg);
2329                 nic->vlan_strip_flag = 0;
2330         }
2331
2332         /*
2333          * Enabling MC-RLDRAM. After enabling the device, we timeout
2334          * for around 100ms, which is approximately the time required
2335          * for the device to be ready for operation.
2336          */
2337         val64 = readq(&bar0->mc_rldram_mrs);
2338         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2339         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2340         val64 = readq(&bar0->mc_rldram_mrs);
2341
2342         msleep(100);    /* Delay by around 100 ms. */
2343
2344         /* Enabling ECC Protection. */
2345         val64 = readq(&bar0->adapter_control);
2346         val64 &= ~ADAPTER_ECC_EN;
2347         writeq(val64, &bar0->adapter_control);
2348
2349         /*
2350          * Verify if the device is ready to be enabled, if so enable
2351          * it.
2352          */
2353         val64 = readq(&bar0->adapter_status);
2354         if (!verify_xena_quiescence(nic)) {
2355                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2356                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2357                           (unsigned long long)val64);
2358                 return FAILURE;
2359         }
2360
2361         /*
2362          * With some switches, link might be already up at this point.
2363          * Because of this weird behavior, when we enable laser,
2364          * we may not get link. We need to handle this. We cannot
2365          * figure out which switch is misbehaving. So we are forced to
2366          * make a global change.
2367          */
2368
2369         /* Enabling Laser. */
2370         val64 = readq(&bar0->adapter_control);
2371         val64 |= ADAPTER_EOI_TX_ON;
2372         writeq(val64, &bar0->adapter_control);
2373
2374         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2375                 /*
2376                  * Dont see link state interrupts initally on some switches,
2377                  * so directly scheduling the link state task here.
2378                  */
2379                 schedule_work(&nic->set_link_task);
2380         }
2381         /* SXE-002: Initialize link and activity LED */
2382         subid = nic->pdev->subsystem_device;
2383         if (((subid & 0xFF) >= 0x07) &&
2384             (nic->device_type == XFRAME_I_DEVICE)) {
2385                 val64 = readq(&bar0->gpio_control);
2386                 val64 |= 0x0000800000000000ULL;
2387                 writeq(val64, &bar0->gpio_control);
2388                 val64 = 0x0411040400000000ULL;
2389                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2390         }
2391
2392         return SUCCESS;
2393 }
2394 /**
2395  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2396  */
2397 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2398                                         struct TxD *txdlp, int get_off)
2399 {
2400         struct s2io_nic *nic = fifo_data->nic;
2401         struct sk_buff *skb;
2402         struct TxD *txds;
2403         u16 j, frg_cnt;
2404
2405         txds = txdlp;
2406         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2407                 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2408                                  sizeof(u64), PCI_DMA_TODEVICE);
2409                 txds++;
2410         }
2411
2412         skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2413         if (!skb) {
2414                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2415                 return NULL;
2416         }
2417         pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2418                          skb->len - skb->data_len, PCI_DMA_TODEVICE);
2419         frg_cnt = skb_shinfo(skb)->nr_frags;
2420         if (frg_cnt) {
2421                 txds++;
2422                 for (j = 0; j < frg_cnt; j++, txds++) {
2423                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2424                         if (!txds->Buffer_Pointer)
2425                                 break;
2426                         pci_unmap_page(nic->pdev,
2427                                        (dma_addr_t)txds->Buffer_Pointer,
2428                                        frag->size, PCI_DMA_TODEVICE);
2429                 }
2430         }
2431         memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2432         return skb;
2433 }
2434
2435 /**
2436  *  free_tx_buffers - Free all queued Tx buffers
2437  *  @nic : device private variable.
2438  *  Description:
2439  *  Free all queued Tx buffers.
2440  *  Return Value: void
2441  */
2442
2443 static void free_tx_buffers(struct s2io_nic *nic)
2444 {
2445         struct net_device *dev = nic->dev;
2446         struct sk_buff *skb;
2447         struct TxD *txdp;
2448         int i, j;
2449         struct mac_info *mac_control;
2450         struct config_param *config;
2451         int cnt = 0;
2452
2453         mac_control = &nic->mac_control;
2454         config = &nic->config;
2455
2456         for (i = 0; i < config->tx_fifo_num; i++) {
2457                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2458                 struct fifo_info *fifo = &mac_control->fifos[i];
2459                 unsigned long flags;
2460
2461                 spin_lock_irqsave(&fifo->tx_lock, flags);
2462                 for (j = 0; j < tx_cfg->fifo_len; j++) {
2463                         txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
2464                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2465                         if (skb) {
2466                                 nic->mac_control.stats_info->sw_stat.mem_freed
2467                                         += skb->truesize;
2468                                 dev_kfree_skb(skb);
2469                                 cnt++;
2470                         }
2471                 }
2472                 DBG_PRINT(INTR_DBG,
2473                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2474                           dev->name, cnt, i);
2475                 fifo->tx_curr_get_info.offset = 0;
2476                 fifo->tx_curr_put_info.offset = 0;
2477                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2478         }
2479 }
2480
2481 /**
2482  *   stop_nic -  To stop the nic
2483  *   @nic ; device private variable.
2484  *   Description:
2485  *   This function does exactly the opposite of what the start_nic()
2486  *   function does. This function is called to stop the device.
2487  *   Return Value:
2488  *   void.
2489  */
2490
2491 static void stop_nic(struct s2io_nic *nic)
2492 {
2493         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2494         register u64 val64 = 0;
2495         u16 interruptible;
2496         struct mac_info *mac_control;
2497         struct config_param *config;
2498
2499         mac_control = &nic->mac_control;
2500         config = &nic->config;
2501
2502         /*  Disable all interrupts */
2503         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2504         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2505         interruptible |= TX_PIC_INTR;
2506         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2507
2508         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2509         val64 = readq(&bar0->adapter_control);
2510         val64 &= ~(ADAPTER_CNTL_EN);
2511         writeq(val64, &bar0->adapter_control);
2512 }
2513
2514 /**
2515  *  fill_rx_buffers - Allocates the Rx side skbs
2516  *  @ring_info: per ring structure
2517  *  @from_card_up: If this is true, we will map the buffer to get
2518  *     the dma address for buf0 and buf1 to give it to the card.
2519  *     Else we will sync the already mapped buffer to give it to the card.
2520  *  Description:
2521  *  The function allocates Rx side skbs and puts the physical
2522  *  address of these buffers into the RxD buffer pointers, so that the NIC
2523  *  can DMA the received frame into these locations.
2524  *  The NIC supports 3 receive modes, viz
2525  *  1. single buffer,
2526  *  2. three buffer and
2527  *  3. Five buffer modes.
2528  *  Each mode defines how many fragments the received frame will be split
2529  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2530  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2531  *  is split into 3 fragments. As of now only single buffer mode is
2532  *  supported.
2533  *   Return Value:
2534  *  SUCCESS on success or an appropriate -ve value on failure.
2535  */
2536 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2537                            int from_card_up)
2538 {
2539         struct sk_buff *skb;
2540         struct RxD_t *rxdp;
2541         int off, size, block_no, block_no1;
2542         u32 alloc_tab = 0;
2543         u32 alloc_cnt;
2544         u64 tmp;
2545         struct buffAdd *ba;
2546         struct RxD_t *first_rxdp = NULL;
2547         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2548         int rxd_index = 0;
2549         struct RxD1 *rxdp1;
2550         struct RxD3 *rxdp3;
2551         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2552
2553         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2554
2555         block_no1 = ring->rx_curr_get_info.block_index;
2556         while (alloc_tab < alloc_cnt) {
2557                 block_no = ring->rx_curr_put_info.block_index;
2558
2559                 off = ring->rx_curr_put_info.offset;
2560
2561                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2562
2563                 rxd_index = off + 1;
2564                 if (block_no)
2565                         rxd_index += (block_no * ring->rxd_count);
2566
2567                 if ((block_no == block_no1) &&
2568                     (off == ring->rx_curr_get_info.offset) &&
2569                     (rxdp->Host_Control)) {
2570                         DBG_PRINT(INTR_DBG, "%s: Get and Put", ring->dev->name);
2571                         DBG_PRINT(INTR_DBG, " info equated\n");
2572                         goto end;
2573                 }
2574                 if (off && (off == ring->rxd_count)) {
2575                         ring->rx_curr_put_info.block_index++;
2576                         if (ring->rx_curr_put_info.block_index ==
2577                             ring->block_count)
2578                                 ring->rx_curr_put_info.block_index = 0;
2579                         block_no = ring->rx_curr_put_info.block_index;
2580                         off = 0;
2581                         ring->rx_curr_put_info.offset = off;
2582                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2583                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2584                                   ring->dev->name, rxdp);
2585
2586                 }
2587
2588                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2589                     ((ring->rxd_mode == RXD_MODE_3B) &&
2590                      (rxdp->Control_2 & s2BIT(0)))) {
2591                         ring->rx_curr_put_info.offset = off;
2592                         goto end;
2593                 }
2594                 /* calculate size of skb based on ring mode */
2595                 size = ring->mtu +
2596                         HEADER_ETHERNET_II_802_3_SIZE +
2597                         HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2598                 if (ring->rxd_mode == RXD_MODE_1)
2599                         size += NET_IP_ALIGN;
2600                 else
2601                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2602
2603                 /* allocate skb */
2604                 skb = dev_alloc_skb(size);
2605                 if (!skb) {
2606                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2607                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2608                         if (first_rxdp) {
2609                                 wmb();
2610                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2611                         }
2612                         stats->mem_alloc_fail_cnt++;
2613
2614                         return -ENOMEM ;
2615                 }
2616                 stats->mem_allocated += skb->truesize;
2617
2618                 if (ring->rxd_mode == RXD_MODE_1) {
2619                         /* 1 buffer mode - normal operation mode */
2620                         rxdp1 = (struct RxD1 *)rxdp;
2621                         memset(rxdp, 0, sizeof(struct RxD1));
2622                         skb_reserve(skb, NET_IP_ALIGN);
2623                         rxdp1->Buffer0_ptr =
2624                                 pci_map_single(ring->pdev, skb->data,
2625                                                size - NET_IP_ALIGN,
2626                                                PCI_DMA_FROMDEVICE);
2627                         if (pci_dma_mapping_error(nic->pdev,
2628                                                   rxdp1->Buffer0_ptr))
2629                                 goto pci_map_failed;
2630
2631                         rxdp->Control_2 =
2632                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2633                         rxdp->Host_Control = (unsigned long)skb;
2634                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2635                         /*
2636                          * 2 buffer mode -
2637                          * 2 buffer mode provides 128
2638                          * byte aligned receive buffers.
2639                          */
2640
2641                         rxdp3 = (struct RxD3 *)rxdp;
2642                         /* save buffer pointers to avoid frequent dma mapping */
2643                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2644                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2645                         memset(rxdp, 0, sizeof(struct RxD3));
2646                         /* restore the buffer pointers for dma sync*/
2647                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2648                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2649
2650                         ba = &ring->ba[block_no][off];
2651                         skb_reserve(skb, BUF0_LEN);
2652                         tmp = (u64)(unsigned long)skb->data;
2653                         tmp += ALIGN_SIZE;
2654                         tmp &= ~ALIGN_SIZE;
2655                         skb->data = (void *) (unsigned long)tmp;
2656                         skb_reset_tail_pointer(skb);
2657
2658                         if (from_card_up) {
2659                                 rxdp3->Buffer0_ptr =
2660                                         pci_map_single(ring->pdev, ba->ba_0,
2661                                                        BUF0_LEN,
2662                                                        PCI_DMA_FROMDEVICE);
2663                                 if (pci_dma_mapping_error(nic->pdev,
2664                                                           rxdp3->Buffer0_ptr))
2665                                         goto pci_map_failed;
2666                         } else
2667                                 pci_dma_sync_single_for_device(ring->pdev,
2668                                                                (dma_addr_t)rxdp3->Buffer0_ptr,
2669                                                                BUF0_LEN,
2670                                                                PCI_DMA_FROMDEVICE);
2671
2672                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2673                         if (ring->rxd_mode == RXD_MODE_3B) {
2674                                 /* Two buffer mode */
2675
2676                                 /*
2677                                  * Buffer2 will have L3/L4 header plus
2678                                  * L4 payload
2679                                  */
2680                                 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2681                                                                     skb->data,
2682                                                                     ring->mtu + 4,
2683                                                                     PCI_DMA_FROMDEVICE);
2684
2685                                 if (pci_dma_mapping_error(nic->pdev,
2686                                                           rxdp3->Buffer2_ptr))
2687                                         goto pci_map_failed;
2688
2689                                 if (from_card_up) {
2690                                         rxdp3->Buffer1_ptr =
2691                                                 pci_map_single(ring->pdev,
2692                                                                ba->ba_1,
2693                                                                BUF1_LEN,
2694                                                                PCI_DMA_FROMDEVICE);
2695
2696                                         if (pci_dma_mapping_error(nic->pdev,
2697                                                                   rxdp3->Buffer1_ptr)) {
2698                                                 pci_unmap_single(ring->pdev,
2699                                                                  (dma_addr_t)(unsigned long)
2700                                                                  skb->data,
2701                                                                  ring->mtu + 4,
2702                                                                  PCI_DMA_FROMDEVICE);
2703                                                 goto pci_map_failed;
2704                                         }
2705                                 }
2706                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2707                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2708                                         (ring->mtu + 4);
2709                         }
2710                         rxdp->Control_2 |= s2BIT(0);
2711                         rxdp->Host_Control = (unsigned long) (skb);
2712                 }
2713                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2714                         rxdp->Control_1 |= RXD_OWN_XENA;
2715                 off++;
2716                 if (off == (ring->rxd_count + 1))
2717                         off = 0;
2718                 ring->rx_curr_put_info.offset = off;
2719
2720                 rxdp->Control_2 |= SET_RXD_MARKER;
2721                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2722                         if (first_rxdp) {
2723                                 wmb();
2724                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2725                         }
2726                         first_rxdp = rxdp;
2727                 }
2728                 ring->rx_bufs_left += 1;
2729                 alloc_tab++;
2730         }
2731
2732 end:
2733         /* Transfer ownership of first descriptor to adapter just before
2734          * exiting. Before that, use memory barrier so that ownership
2735          * and other fields are seen by adapter correctly.
2736          */
2737         if (first_rxdp) {
2738                 wmb();
2739                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2740         }
2741
2742         return SUCCESS;
2743
2744 pci_map_failed:
2745         stats->pci_map_fail_cnt++;
2746         stats->mem_freed += skb->truesize;
2747         dev_kfree_skb_irq(skb);
2748         return -ENOMEM;
2749 }
2750
2751 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2752 {
2753         struct net_device *dev = sp->dev;
2754         int j;
2755         struct sk_buff *skb;
2756         struct RxD_t *rxdp;
2757         struct mac_info *mac_control;
2758         struct buffAdd *ba;
2759         struct RxD1 *rxdp1;
2760         struct RxD3 *rxdp3;
2761
2762         mac_control = &sp->mac_control;
2763         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2764                 rxdp = mac_control->rings[ring_no].
2765                         rx_blocks[blk].rxds[j].virt_addr;
2766                 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2767                 if (!skb)
2768                         continue;
2769                 if (sp->rxd_mode == RXD_MODE_1) {
2770                         rxdp1 = (struct RxD1 *)rxdp;
2771                         pci_unmap_single(sp->pdev,
2772                                          (dma_addr_t)rxdp1->Buffer0_ptr,
2773                                          dev->mtu +
2774                                          HEADER_ETHERNET_II_802_3_SIZE +
2775                                          HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2776                                          PCI_DMA_FROMDEVICE);
2777                         memset(rxdp, 0, sizeof(struct RxD1));
2778                 } else if (sp->rxd_mode == RXD_MODE_3B) {
2779                         rxdp3 = (struct RxD3 *)rxdp;
2780                         ba = &mac_control->rings[ring_no].ba[blk][j];
2781                         pci_unmap_single(sp->pdev,
2782                                          (dma_addr_t)rxdp3->Buffer0_ptr,
2783                                          BUF0_LEN,
2784                                          PCI_DMA_FROMDEVICE);
2785                         pci_unmap_single(sp->pdev,
2786                                          (dma_addr_t)rxdp3->Buffer1_ptr,
2787                                          BUF1_LEN,
2788                                          PCI_DMA_FROMDEVICE);
2789                         pci_unmap_single(sp->pdev,
2790                                          (dma_addr_t)rxdp3->Buffer2_ptr,
2791                                          dev->mtu + 4,
2792                                          PCI_DMA_FROMDEVICE);
2793                         memset(rxdp, 0, sizeof(struct RxD3));
2794                 }
2795                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2796                 dev_kfree_skb(skb);
2797                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2798         }
2799 }
2800
2801 /**
2802  *  free_rx_buffers - Frees all Rx buffers
2803  *  @sp: device private variable.
2804  *  Description:
2805  *  This function will free all Rx buffers allocated by host.
2806  *  Return Value:
2807  *  NONE.
2808  */
2809
2810 static void free_rx_buffers(struct s2io_nic *sp)
2811 {
2812         struct net_device *dev = sp->dev;
2813         int i, blk = 0, buf_cnt = 0;
2814         struct mac_info *mac_control;
2815         struct config_param *config;
2816
2817         mac_control = &sp->mac_control;
2818         config = &sp->config;
2819
2820         for (i = 0; i < config->rx_ring_num; i++) {
2821                 struct ring_info *ring = &mac_control->rings[i];
2822
2823                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2824                         free_rxd_blk(sp, i, blk);
2825
2826                 ring->rx_curr_put_info.block_index = 0;
2827                 ring->rx_curr_get_info.block_index = 0;
2828                 ring->rx_curr_put_info.offset = 0;
2829                 ring->rx_curr_get_info.offset = 0;
2830                 ring->rx_bufs_left = 0;
2831                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2832                           dev->name, buf_cnt, i);
2833         }
2834 }
2835
2836 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2837 {
2838         if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2839                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2840                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2841         }
2842         return 0;
2843 }
2844
2845 /**
2846  * s2io_poll - Rx interrupt handler for NAPI support
2847  * @napi : pointer to the napi structure.
2848  * @budget : The number of packets that were budgeted to be processed
2849  * during  one pass through the 'Poll" function.
2850  * Description:
2851  * Comes into picture only if NAPI support has been incorporated. It does
2852  * the same thing that rx_intr_handler does, but not in a interrupt context
2853  * also It will process only a given number of packets.
2854  * Return value:
2855  * 0 on success and 1 if there are No Rx packets to be processed.
2856  */
2857
2858 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2859 {
2860         struct ring_info *ring = container_of(napi, struct ring_info, napi);
2861         struct net_device *dev = ring->dev;
2862         struct config_param *config;
2863         struct mac_info *mac_control;
2864         int pkts_processed = 0;
2865         u8 __iomem *addr = NULL;
2866         u8 val8 = 0;
2867         struct s2io_nic *nic = netdev_priv(dev);
2868         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2869         int budget_org = budget;
2870
2871         config = &nic->config;
2872         mac_control = &nic->mac_control;
2873
2874         if (unlikely(!is_s2io_card_up(nic)))
2875                 return 0;
2876
2877         pkts_processed = rx_intr_handler(ring, budget);
2878         s2io_chk_rx_buffers(nic, ring);
2879
2880         if (pkts_processed < budget_org) {
2881                 napi_complete(napi);
2882                 /*Re Enable MSI-Rx Vector*/
2883                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2884                 addr += 7 - ring->ring_no;
2885                 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2886                 writeb(val8, addr);
2887                 val8 = readb(addr);
2888         }
2889         return pkts_processed;
2890 }
2891
2892 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2893 {
2894         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2895         struct config_param *config;
2896         struct mac_info *mac_control;
2897         int pkts_processed = 0;
2898         int ring_pkts_processed, i;
2899         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2900         int budget_org = budget;
2901
2902         config = &nic->config;
2903         mac_control = &nic->mac_control;
2904
2905         if (unlikely(!is_s2io_card_up(nic)))
2906                 return 0;
2907
2908         for (i = 0; i < config->rx_ring_num; i++) {
2909                 struct ring_info *ring = &mac_control->rings[i];
2910                 ring_pkts_processed = rx_intr_handler(ring, budget);
2911                 s2io_chk_rx_buffers(nic, ring);
2912                 pkts_processed += ring_pkts_processed;
2913                 budget -= ring_pkts_processed;
2914                 if (budget <= 0)
2915                         break;
2916         }
2917         if (pkts_processed < budget_org) {
2918                 napi_complete(napi);
2919                 /* Re enable the Rx interrupts for the ring */
2920                 writeq(0, &bar0->rx_traffic_mask);
2921                 readl(&bar0->rx_traffic_mask);
2922         }
2923         return pkts_processed;
2924 }
2925
2926 #ifdef CONFIG_NET_POLL_CONTROLLER
2927 /**
2928  * s2io_netpoll - netpoll event handler entry point
2929  * @dev : pointer to the device structure.
2930  * Description:
2931  *      This function will be called by upper layer to check for events on the
2932  * interface in situations where interrupts are disabled. It is used for
2933  * specific in-kernel networking tasks, such as remote consoles and kernel
2934  * debugging over the network (example netdump in RedHat).
2935  */
2936 static void s2io_netpoll(struct net_device *dev)
2937 {
2938         struct s2io_nic *nic = netdev_priv(dev);
2939         struct mac_info *mac_control;
2940         struct config_param *config;
2941         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2942         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2943         int i;
2944
2945         if (pci_channel_offline(nic->pdev))
2946                 return;
2947
2948         disable_irq(dev->irq);
2949
2950         mac_control = &nic->mac_control;
2951         config = &nic->config;
2952
2953         writeq(val64, &bar0->rx_traffic_int);
2954         writeq(val64, &bar0->tx_traffic_int);
2955
2956         /* we need to free up the transmitted skbufs or else netpoll will
2957          * run out of skbs and will fail and eventually netpoll application such
2958          * as netdump will fail.
2959          */
2960         for (i = 0; i < config->tx_fifo_num; i++)
2961                 tx_intr_handler(&mac_control->fifos[i]);
2962
2963         /* check for received packet and indicate up to network */
2964         for (i = 0; i < config->rx_ring_num; i++) {
2965                 struct ring_info *ring = &mac_control->rings[i];
2966
2967                 rx_intr_handler(ring, 0);
2968         }
2969
2970         for (i = 0; i < config->rx_ring_num; i++) {
2971                 struct ring_info *ring = &mac_control->rings[i];
2972
2973                 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2974                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2975                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2976                         break;
2977                 }
2978         }
2979         enable_irq(dev->irq);
2980         return;
2981 }
2982 #endif
2983
2984 /**
2985  *  rx_intr_handler - Rx interrupt handler
2986  *  @ring_info: per ring structure.
2987  *  @budget: budget for napi processing.
2988  *  Description:
2989  *  If the interrupt is because of a received frame or if the
2990  *  receive ring contains fresh as yet un-processed frames,this function is
2991  *  called. It picks out the RxD at which place the last Rx processing had
2992  *  stopped and sends the skb to the OSM's Rx handler and then increments
2993  *  the offset.
2994  *  Return Value:
2995  *  No. of napi packets processed.
2996  */
2997 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2998 {
2999         int get_block, put_block;
3000         struct rx_curr_get_info get_info, put_info;
3001         struct RxD_t *rxdp;
3002         struct sk_buff *skb;
3003         int pkt_cnt = 0, napi_pkts = 0;
3004         int i;
3005         struct RxD1 *rxdp1;
3006         struct RxD3 *rxdp3;
3007
3008         get_info = ring_data->rx_curr_get_info;
3009         get_block = get_info.block_index;
3010         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
3011         put_block = put_info.block_index;
3012         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
3013
3014         while (RXD_IS_UP2DT(rxdp)) {
3015                 /*
3016                  * If your are next to put index then it's
3017                  * FIFO full condition
3018                  */
3019                 if ((get_block == put_block) &&
3020                     (get_info.offset + 1) == put_info.offset) {
3021                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
3022                                   ring_data->dev->name);
3023                         break;
3024                 }
3025                 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
3026                 if (skb == NULL) {
3027                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
3028                                   ring_data->dev->name);
3029                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3030                         return 0;
3031                 }
3032                 if (ring_data->rxd_mode == RXD_MODE_1) {
3033                         rxdp1 = (struct RxD1 *)rxdp;
3034                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3035                                          rxdp1->Buffer0_ptr,
3036                                          ring_data->mtu +
3037                                          HEADER_ETHERNET_II_802_3_SIZE +
3038                                          HEADER_802_2_SIZE +
3039                                          HEADER_SNAP_SIZE,
3040                                          PCI_DMA_FROMDEVICE);
3041                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3042                         rxdp3 = (struct RxD3 *)rxdp;
3043                         pci_dma_sync_single_for_cpu(ring_data->pdev,
3044                                                     (dma_addr_t)rxdp3->Buffer0_ptr,
3045                                                     BUF0_LEN,
3046                                                     PCI_DMA_FROMDEVICE);
3047                         pci_unmap_single(ring_data->pdev,
3048                                          (dma_addr_t)rxdp3->Buffer2_ptr,
3049                                          ring_data->mtu + 4,
3050                                          PCI_DMA_FROMDEVICE);
3051                 }
3052                 prefetch(skb->data);
3053                 rx_osm_handler(ring_data, rxdp);
3054                 get_info.offset++;
3055                 ring_data->rx_curr_get_info.offset = get_info.offset;
3056                 rxdp = ring_data->rx_blocks[get_block].
3057                         rxds[get_info.offset].virt_addr;
3058                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3059                         get_info.offset = 0;
3060                         ring_data->rx_curr_get_info.offset = get_info.offset;
3061                         get_block++;
3062                         if (get_block == ring_data->block_count)
3063                                 get_block = 0;
3064                         ring_data->rx_curr_get_info.block_index = get_block;
3065                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3066                 }
3067
3068                 if (ring_data->nic->config.napi) {
3069                         budget--;
3070                         napi_pkts++;
3071                         if (!budget)
3072                                 break;
3073                 }
3074                 pkt_cnt++;
3075                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3076                         break;
3077         }
3078         if (ring_data->lro) {
3079                 /* Clear all LRO sessions before exiting */
3080                 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
3081                         struct lro *lro = &ring_data->lro0_n[i];
3082                         if (lro->in_use) {
3083                                 update_L3L4_header(ring_data->nic, lro);
3084                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3085                                 clear_lro_session(lro);
3086                         }
3087                 }
3088         }
3089         return napi_pkts;
3090 }
3091
3092 /**
3093  *  tx_intr_handler - Transmit interrupt handler
3094  *  @nic : device private variable
3095  *  Description:
3096  *  If an interrupt was raised to indicate DMA complete of the
3097  *  Tx packet, this function is called. It identifies the last TxD
3098  *  whose buffer was freed and frees all skbs whose data have already
3099  *  DMA'ed into the NICs internal memory.
3100  *  Return Value:
3101  *  NONE
3102  */
3103
3104 static void tx_intr_handler(struct fifo_info *fifo_data)
3105 {
3106         struct s2io_nic *nic = fifo_data->nic;
3107         struct tx_curr_get_info get_info, put_info;
3108         struct sk_buff *skb = NULL;
3109         struct TxD *txdlp;
3110         int pkt_cnt = 0;
3111         unsigned long flags = 0;
3112         u8 err_mask;
3113
3114         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3115                 return;
3116
3117         get_info = fifo_data->tx_curr_get_info;
3118         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3119         txdlp = (struct TxD *)
3120                 fifo_data->list_info[get_info.offset].list_virt_addr;
3121         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3122                (get_info.offset != put_info.offset) &&
3123                (txdlp->Host_Control)) {
3124                 /* Check for TxD errors */
3125                 if (txdlp->Control_1 & TXD_T_CODE) {
3126                         unsigned long long err;
3127                         err = txdlp->Control_1 & TXD_T_CODE;
3128                         if (err & 0x1) {
3129                                 nic->mac_control.stats_info->sw_stat.
3130                                         parity_err_cnt++;
3131                         }
3132
3133                         /* update t_code statistics */
3134                         err_mask = err >> 48;
3135                         switch (err_mask) {
3136                         case 2:
3137                                 nic->mac_control.stats_info->sw_stat.
3138                                         tx_buf_abort_cnt++;
3139                                 break;
3140
3141                         case 3:
3142                                 nic->mac_control.stats_info->sw_stat.
3143                                         tx_desc_abort_cnt++;
3144                                 break;
3145
3146                         case 7:
3147                                 nic->mac_control.stats_info->sw_stat.
3148                                         tx_parity_err_cnt++;
3149                                 break;
3150
3151                         case 10:
3152                                 nic->mac_control.stats_info->sw_stat.
3153                                         tx_link_loss_cnt++;
3154                                 break;
3155
3156                         case 15:
3157                                 nic->mac_control.stats_info->sw_stat.
3158                                         tx_list_proc_err_cnt++;
3159                                 break;
3160                         }
3161                 }
3162
3163                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3164                 if (skb == NULL) {
3165                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3166                         DBG_PRINT(ERR_DBG, "%s: Null skb ", __func__);
3167                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3168                         return;
3169                 }
3170                 pkt_cnt++;
3171
3172                 /* Updating the statistics block */
3173                 nic->dev->stats.tx_bytes += skb->len;
3174                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3175                 dev_kfree_skb_irq(skb);
3176
3177                 get_info.offset++;
3178                 if (get_info.offset == get_info.fifo_len + 1)
3179                         get_info.offset = 0;
3180                 txdlp = (struct TxD *)
3181                         fifo_data->list_info[get_info.offset].list_virt_addr;
3182                 fifo_data->tx_curr_get_info.offset = get_info.offset;
3183         }
3184
3185         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3186
3187         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3188 }
3189
3190 /**
3191  *  s2io_mdio_write - Function to write in to MDIO registers
3192  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3193  *  @addr     : address value
3194  *  @value    : data value
3195  *  @dev      : pointer to net_device structure
3196  *  Description:
3197  *  This function is used to write values to the MDIO registers
3198  *  NONE
3199  */
3200 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3201                             struct net_device *dev)
3202 {
3203         u64 val64;
3204         struct s2io_nic *sp = netdev_priv(dev);
3205         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3206
3207         /* address transaction */
3208         val64 = MDIO_MMD_INDX_ADDR(addr) |
3209                 MDIO_MMD_DEV_ADDR(mmd_type) |
3210                 MDIO_MMS_PRT_ADDR(0x0);
3211         writeq(val64, &bar0->mdio_control);
3212         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3213         writeq(val64, &bar0->mdio_control);
3214         udelay(100);
3215
3216         /* Data transaction */
3217         val64 = MDIO_MMD_INDX_ADDR(addr) |
3218                 MDIO_MMD_DEV_ADDR(mmd_type) |
3219                 MDIO_MMS_PRT_ADDR(0x0) |
3220                 MDIO_MDIO_DATA(value) |
3221                 MDIO_OP(MDIO_OP_WRITE_TRANS);
3222         writeq(val64, &bar0->mdio_control);
3223         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3224         writeq(val64, &bar0->mdio_control);
3225         udelay(100);
3226
3227         val64 = MDIO_MMD_INDX_ADDR(addr) |
3228                 MDIO_MMD_DEV_ADDR(mmd_type) |
3229                 MDIO_MMS_PRT_ADDR(0x0) |
3230                 MDIO_OP(MDIO_OP_READ_TRANS);
3231         writeq(val64, &bar0->mdio_control);
3232         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3233         writeq(val64, &bar0->mdio_control);
3234         udelay(100);
3235 }
3236
3237 /**
3238  *  s2io_mdio_read - Function to write in to MDIO registers
3239  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3240  *  @addr     : address value
3241  *  @dev      : pointer to net_device structure
3242  *  Description:
3243  *  This function is used to read values to the MDIO registers
3244  *  NONE
3245  */
3246 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3247 {
3248         u64 val64 = 0x0;
3249         u64 rval64 = 0x0;
3250         struct s2io_nic *sp = netdev_priv(dev);
3251         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3252
3253         /* address transaction */
3254         val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3255                          | MDIO_MMD_DEV_ADDR(mmd_type)
3256                          | MDIO_MMS_PRT_ADDR(0x0));
3257         writeq(val64, &bar0->mdio_control);
3258         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3259         writeq(val64, &bar0->mdio_control);
3260         udelay(100);
3261
3262         /* Data transaction */
3263         val64 = MDIO_MMD_INDX_ADDR(addr) |
3264                 MDIO_MMD_DEV_ADDR(mmd_type) |
3265                 MDIO_MMS_PRT_ADDR(0x0) |
3266                 MDIO_OP(MDIO_OP_READ_TRANS);
3267         writeq(val64, &bar0->mdio_control);
3268         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3269         writeq(val64, &bar0->mdio_control);
3270         udelay(100);
3271
3272         /* Read the value from regs */
3273         rval64 = readq(&bar0->mdio_control);
3274         rval64 = rval64 & 0xFFFF0000;
3275         rval64 = rval64 >> 16;
3276         return rval64;
3277 }
3278
3279 /**
3280  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3281  *  @counter      : couter value to be updated
3282  *  @flag         : flag to indicate the status
3283  *  @type         : counter type
3284  *  Description:
3285  *  This function is to check the status of the xpak counters value
3286  *  NONE
3287  */
3288
3289 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3290                                   u16 flag, u16 type)
3291 {
3292         u64 mask = 0x3;
3293         u64 val64;
3294         int i;
3295         for (i = 0; i < index; i++)
3296                 mask = mask << 0x2;
3297
3298         if (flag > 0) {
3299                 *counter = *counter + 1;
3300                 val64 = *regs_stat & mask;
3301                 val64 = val64 >> (index * 0x2);
3302                 val64 = val64 + 1;
3303                 if (val64 == 3) {
3304                         switch (type) {
3305                         case 1:
3306                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3307                                           "service. Excessive temperatures may "
3308                                           "result in premature transceiver "
3309                                           "failure \n");
3310                                 break;
3311                         case 2:
3312                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3313                                           "service Excessive bias currents may "
3314                                           "indicate imminent laser diode "
3315                                           "failure \n");
3316                                 break;
3317                         case 3:
3318                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3319                                           "service Excessive laser output "
3320                                           "power may saturate far-end "
3321                                           "receiver\n");
3322                                 break;
3323                         default:
3324                                 DBG_PRINT(ERR_DBG,
3325                                           "Incorrect XPAK Alarm type\n");
3326                         }
3327                         val64 = 0x0;
3328                 }
3329                 val64 = val64 << (index * 0x2);
3330                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3331
3332         } else {
3333                 *regs_stat = *regs_stat & (~mask);
3334         }
3335 }
3336
3337 /**
3338  *  s2io_updt_xpak_counter - Function to update the xpak counters
3339  *  @dev         : pointer to net_device struct
3340  *  Description:
3341  *  This function is to upate the status of the xpak counters value
3342  *  NONE
3343  */
3344 static void s2io_updt_xpak_counter(struct net_device *dev)
3345 {
3346         u16 flag  = 0x0;
3347         u16 type  = 0x0;
3348         u16 val16 = 0x0;
3349         u64 val64 = 0x0;
3350         u64 addr  = 0x0;
3351
3352         struct s2io_nic *sp = netdev_priv(dev);
3353         struct stat_block *stat_info = sp->mac_control.stats_info;
3354
3355         /* Check the communication with the MDIO slave */
3356         addr = MDIO_CTRL1;
3357         val64 = 0x0;
3358         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3359         if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3360                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3361                           "Returned %llx\n", (unsigned long long)val64);
3362                 return;
3363         }
3364
3365         /* Check for the expected value of control reg 1 */
3366         if (val64 != MDIO_CTRL1_SPEED10G) {
3367                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3368                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x%x\n",
3369                           (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3370                 return;
3371         }
3372
3373         /* Loading the DOM register to MDIO register */
3374         addr = 0xA100;
3375         s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3376         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3377
3378         /* Reading the Alarm flags */
3379         addr = 0xA070;
3380         val64 = 0x0;
3381         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3382
3383         flag = CHECKBIT(val64, 0x7);
3384         type = 1;
3385         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3386                               &stat_info->xpak_stat.xpak_regs_stat,
3387                               0x0, flag, type);
3388
3389         if (CHECKBIT(val64, 0x6))
3390                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3391
3392         flag = CHECKBIT(val64, 0x3);
3393         type = 2;
3394         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3395                               &stat_info->xpak_stat.xpak_regs_stat,
3396                               0x2, flag, type);
3397
3398         if (CHECKBIT(val64, 0x2))
3399                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3400
3401         flag = CHECKBIT(val64, 0x1);
3402         type = 3;
3403         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3404                               &stat_info->xpak_stat.xpak_regs_stat,
3405                               0x4, flag, type);
3406
3407         if (CHECKBIT(val64, 0x0))
3408                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3409
3410         /* Reading the Warning flags */
3411         addr = 0xA074;
3412         val64 = 0x0;
3413         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3414
3415         if (CHECKBIT(val64, 0x7))
3416                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3417
3418         if (CHECKBIT(val64, 0x6))
3419                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3420
3421         if (CHECKBIT(val64, 0x3))
3422                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3423
3424         if (CHECKBIT(val64, 0x2))
3425                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3426
3427         if (CHECKBIT(val64, 0x1))
3428                 stat_info->xpak_stat.warn_laser_output_power_high++;
3429
3430         if (CHECKBIT(val64, 0x0))
3431                 stat_info->xpak_stat.warn_laser_output_power_low++;
3432 }
3433
3434 /**
3435  *  wait_for_cmd_complete - waits for a command to complete.
3436  *  @sp : private member of the device structure, which is a pointer to the
3437  *  s2io_nic structure.
3438  *  Description: Function that waits for a command to Write into RMAC
3439  *  ADDR DATA registers to be completed and returns either success or
3440  *  error depending on whether the command was complete or not.
3441  *  Return value:
3442  *   SUCCESS on success and FAILURE on failure.
3443  */
3444
3445 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3446                                  int bit_state)
3447 {
3448         int ret = FAILURE, cnt = 0, delay = 1;
3449         u64 val64;
3450
3451         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3452                 return FAILURE;
3453
3454         do {
3455                 val64 = readq(addr);
3456                 if (bit_state == S2IO_BIT_RESET) {
3457                         if (!(val64 & busy_bit)) {
3458                                 ret = SUCCESS;
3459                                 break;
3460                         }
3461                 } else {
3462                         if (!(val64 & busy_bit)) {
3463                                 ret = SUCCESS;
3464                                 break;
3465                         }
3466                 }
3467
3468                 if (in_interrupt())
3469                         mdelay(delay);
3470                 else
3471                         msleep(delay);
3472
3473                 if (++cnt >= 10)
3474                         delay = 50;
3475         } while (cnt < 20);
3476         return ret;
3477 }
3478 /*
3479  * check_pci_device_id - Checks if the device id is supported
3480  * @id : device id
3481  * Description: Function to check if the pci device id is supported by driver.
3482  * Return value: Actual device id if supported else PCI_ANY_ID
3483  */
3484 static u16 check_pci_device_id(u16 id)
3485 {
3486         switch (id) {
3487         case PCI_DEVICE_ID_HERC_WIN:
3488         case PCI_DEVICE_ID_HERC_UNI:
3489                 return XFRAME_II_DEVICE;
3490         case PCI_DEVICE_ID_S2IO_UNI:
3491         case PCI_DEVICE_ID_S2IO_WIN:
3492                 return XFRAME_I_DEVICE;
3493         default:
3494                 return PCI_ANY_ID;
3495         }
3496 }
3497
3498 /**
3499  *  s2io_reset - Resets the card.
3500  *  @sp : private member of the device structure.
3501  *  Description: Function to Reset the card. This function then also
3502  *  restores the previously saved PCI configuration space registers as
3503  *  the card reset also resets the configuration space.
3504  *  Return value:
3505  *  void.
3506  */
3507
3508 static void s2io_reset(struct s2io_nic *sp)
3509 {
3510         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3511         u64 val64;
3512         u16 subid, pci_cmd;
3513         int i;
3514         u16 val16;
3515         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3516         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3517
3518         DBG_PRINT(INIT_DBG, "%s - Resetting XFrame card %s\n",
3519                   __func__, sp->dev->name);
3520
3521         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3522         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3523
3524         val64 = SW_RESET_ALL;
3525         writeq(val64, &bar0->sw_reset);
3526         if (strstr(sp->product_name, "CX4"))
3527                 msleep(750);
3528         msleep(250);
3529         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3530
3531                 /* Restore the PCI state saved during initialization. */
3532                 pci_restore_state(sp->pdev);
3533                 pci_read_config_word(sp->pdev, 0x2, &val16);
3534                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3535                         break;
3536                 msleep(200);
3537         }
3538
3539         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3540                 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3541
3542         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3543
3544         s2io_init_pci(sp);
3545
3546         /* Set swapper to enable I/O register access */
3547         s2io_set_swapper(sp);
3548
3549         /* restore mac_addr entries */
3550         do_s2io_restore_unicast_mc(sp);
3551
3552         /* Restore the MSIX table entries from local variables */
3553         restore_xmsi_data(sp);
3554
3555         /* Clear certain PCI/PCI-X fields after reset */
3556         if (sp->device_type == XFRAME_II_DEVICE) {
3557                 /* Clear "detected parity error" bit */
3558                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3559
3560                 /* Clearing PCIX Ecc status register */
3561                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3562
3563                 /* Clearing PCI_STATUS error reflected here */
3564                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3565         }
3566
3567         /* Reset device statistics maintained by OS */
3568         memset(&sp->stats, 0, sizeof(struct net_device_stats));
3569
3570         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3571         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3572         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3573         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3574         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3575         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3576         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3577         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3578         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3579         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3580         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3581         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3582         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3583         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3584         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3585         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3586         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3587         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3588         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3589
3590         /* SXE-002: Configure link and activity LED to turn it off */
3591         subid = sp->pdev->subsystem_device;
3592         if (((subid & 0xFF) >= 0x07) &&
3593             (sp->device_type == XFRAME_I_DEVICE)) {
3594                 val64 = readq(&bar0->gpio_control);
3595                 val64 |= 0x0000800000000000ULL;
3596                 writeq(val64, &bar0->gpio_control);
3597                 val64 = 0x0411040400000000ULL;
3598                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3599         }
3600
3601         /*
3602          * Clear spurious ECC interrupts that would have occured on
3603          * XFRAME II cards after reset.
3604          */
3605         if (sp->device_type == XFRAME_II_DEVICE) {
3606                 val64 = readq(&bar0->pcc_err_reg);
3607                 writeq(val64, &bar0->pcc_err_reg);
3608         }
3609
3610         sp->device_enabled_once = false;
3611 }
3612
3613 /**
3614  *  s2io_set_swapper - to set the swapper controle on the card
3615  *  @sp : private member of the device structure,
3616  *  pointer to the s2io_nic structure.
3617  *  Description: Function to set the swapper control on the card
3618  *  correctly depending on the 'endianness' of the system.
3619  *  Return value:
3620  *  SUCCESS on success and FAILURE on failure.
3621  */
3622
3623 static int s2io_set_swapper(struct s2io_nic *sp)
3624 {
3625         struct net_device *dev = sp->dev;
3626         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3627         u64 val64, valt, valr;
3628
3629         /*
3630          * Set proper endian settings and verify the same by reading
3631          * the PIF Feed-back register.
3632          */
3633
3634         val64 = readq(&bar0->pif_rd_swapper_fb);
3635         if (val64 != 0x0123456789ABCDEFULL) {
3636                 int i = 0;
3637                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3638                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3639                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3640                                 0};                     /* FE=0, SE=0 */
3641
3642                 while (i < 4) {
3643                         writeq(value[i], &bar0->swapper_ctrl);
3644                         val64 = readq(&bar0->pif_rd_swapper_fb);
3645                         if (val64 == 0x0123456789ABCDEFULL)
3646                                 break;
3647                         i++;
3648                 }
3649                 if (i == 4) {
3650                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3651                                   dev->name);
3652                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3653                                   (unsigned long long)val64);
3654                         return FAILURE;
3655                 }
3656                 valr = value[i];
3657         } else {
3658                 valr = readq(&bar0->swapper_ctrl);
3659         }
3660
3661         valt = 0x0123456789ABCDEFULL;
3662         writeq(valt, &bar0->xmsi_address);
3663         val64 = readq(&bar0->xmsi_address);
3664
3665         if (val64 != valt) {
3666                 int i = 0;
3667                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3668                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3669                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3670                                 0};                     /* FE=0, SE=0 */
3671
3672                 while (i < 4) {
3673                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3674                         writeq(valt, &bar0->xmsi_address);
3675                         val64 = readq(&bar0->xmsi_address);
3676                         if (val64 == valt)
3677                                 break;
3678                         i++;
3679                 }
3680                 if (i == 4) {
3681                         unsigned long long x = val64;
3682                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3683                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3684                         return FAILURE;
3685                 }
3686         }
3687         val64 = readq(&bar0->swapper_ctrl);
3688         val64 &= 0xFFFF000000000000ULL;
3689
3690 #ifdef __BIG_ENDIAN
3691         /*
3692          * The device by default set to a big endian format, so a
3693          * big endian driver need not set anything.
3694          */
3695         val64 |= (SWAPPER_CTRL_TXP_FE |
3696                   SWAPPER_CTRL_TXP_SE |
3697                   SWAPPER_CTRL_TXD_R_FE |
3698                   SWAPPER_CTRL_TXD_W_FE |
3699                   SWAPPER_CTRL_TXF_R_FE |
3700                   SWAPPER_CTRL_RXD_R_FE |
3701                   SWAPPER_CTRL_RXD_W_FE |
3702                   SWAPPER_CTRL_RXF_W_FE |
3703                   SWAPPER_CTRL_XMSI_FE |
3704                   SWAPPER_CTRL_STATS_FE |
3705                   SWAPPER_CTRL_STATS_SE);
3706         if (sp->config.intr_type == INTA)
3707                 val64 |= SWAPPER_CTRL_XMSI_SE;
3708         writeq(val64, &bar0->swapper_ctrl);
3709 #else
3710         /*
3711          * Initially we enable all bits to make it accessible by the
3712          * driver, then we selectively enable only those bits that
3713          * we want to set.
3714          */
3715         val64 |= (SWAPPER_CTRL_TXP_FE |
3716                   SWAPPER_CTRL_TXP_SE |
3717                   SWAPPER_CTRL_TXD_R_FE |
3718                   SWAPPER_CTRL_TXD_R_SE |
3719                   SWAPPER_CTRL_TXD_W_FE |
3720                   SWAPPER_CTRL_TXD_W_SE |
3721                   SWAPPER_CTRL_TXF_R_FE |
3722                   SWAPPER_CTRL_RXD_R_FE |
3723                   SWAPPER_CTRL_RXD_R_SE |
3724                   SWAPPER_CTRL_RXD_W_FE |
3725                   SWAPPER_CTRL_RXD_W_SE |
3726                   SWAPPER_CTRL_RXF_W_FE |
3727                   SWAPPER_CTRL_XMSI_FE |
3728                   SWAPPER_CTRL_STATS_FE |
3729                   SWAPPER_CTRL_STATS_SE);
3730         if (sp->config.intr_type == INTA)
3731                 val64 |= SWAPPER_CTRL_XMSI_SE;
3732         writeq(val64, &bar0->swapper_ctrl);
3733 #endif
3734         val64 = readq(&bar0->swapper_ctrl);
3735
3736         /*
3737          * Verifying if endian settings are accurate by reading a
3738          * feedback register.
3739          */
3740         val64 = readq(&bar0->pif_rd_swapper_fb);
3741         if (val64 != 0x0123456789ABCDEFULL) {
3742                 /* Endian settings are incorrect, calls for another dekko. */
3743                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3744                           dev->name);
3745                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3746                           (unsigned long long)val64);
3747                 return FAILURE;
3748         }
3749
3750         return SUCCESS;
3751 }
3752
3753 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3754 {
3755         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3756         u64 val64;
3757         int ret = 0, cnt = 0;
3758
3759         do {
3760                 val64 = readq(&bar0->xmsi_access);
3761                 if (!(val64 & s2BIT(15)))
3762                         break;
3763                 mdelay(1);
3764                 cnt++;
3765         } while (cnt < 5);
3766         if (cnt == 5) {
3767                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3768                 ret = 1;
3769         }
3770
3771         return ret;
3772 }
3773
3774 static void restore_xmsi_data(struct s2io_nic *nic)
3775 {
3776         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3777         u64 val64;
3778         int i, msix_index;
3779
3780         if (nic->device_type == XFRAME_I_DEVICE)
3781                 return;
3782
3783         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3784                 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3785                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3786                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3787                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3788                 writeq(val64, &bar0->xmsi_access);
3789                 if (wait_for_msix_trans(nic, msix_index)) {
3790                         DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3791                         continue;
3792                 }
3793         }
3794 }
3795
3796 static void store_xmsi_data(struct s2io_nic *nic)
3797 {
3798         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3799         u64 val64, addr, data;
3800         int i, msix_index;
3801
3802         if (nic->device_type == XFRAME_I_DEVICE)
3803                 return;
3804
3805         /* Store and display */
3806         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3807                 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3808                 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3809                 writeq(val64, &bar0->xmsi_access);
3810                 if (wait_for_msix_trans(nic, msix_index)) {
3811                         DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3812                         continue;
3813                 }
3814                 addr = readq(&bar0->xmsi_address);
3815                 data = readq(&bar0->xmsi_data);
3816                 if (addr && data) {
3817                         nic->msix_info[i].addr = addr;
3818                         nic->msix_info[i].data = data;
3819                 }
3820         }
3821 }
3822
3823 static int s2io_enable_msi_x(struct s2io_nic *nic)
3824 {
3825         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3826         u64 rx_mat;
3827         u16 msi_control; /* Temp variable */
3828         int ret, i, j, msix_indx = 1;
3829         int size;
3830
3831         size = nic->num_entries * sizeof(struct msix_entry);
3832         nic->entries = kzalloc(size, GFP_KERNEL);
3833         if (!nic->entries) {
3834                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3835                           __func__);
3836                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3837                 return -ENOMEM;
3838         }
3839         nic->mac_control.stats_info->sw_stat.mem_allocated += size;
3840
3841         size = nic->num_entries * sizeof(struct s2io_msix_entry);
3842         nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3843         if (!nic->s2io_entries) {
3844                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3845                           __func__);
3846                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3847                 kfree(nic->entries);
3848                 nic->mac_control.stats_info->sw_stat.mem_freed
3849                         += (nic->num_entries * sizeof(struct msix_entry));
3850                 return -ENOMEM;
3851         }
3852         nic->mac_control.stats_info->sw_stat.mem_allocated += size;
3853
3854         nic->entries[0].entry = 0;
3855         nic->s2io_entries[0].entry = 0;
3856         nic->s2io_entries[0].in_use = MSIX_FLG;
3857         nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3858         nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3859
3860         for (i = 1; i < nic->num_entries; i++) {
3861                 nic->entries[i].entry = ((i - 1) * 8) + 1;
3862                 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3863                 nic->s2io_entries[i].arg = NULL;
3864                 nic->s2io_entries[i].in_use = 0;
3865         }
3866
3867         rx_mat = readq(&bar0->rx_mat);
3868         for (j = 0; j < nic->config.rx_ring_num; j++) {
3869                 rx_mat |= RX_MAT_SET(j, msix_indx);
3870                 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3871                 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3872                 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3873                 msix_indx += 8;
3874         }
3875         writeq(rx_mat, &bar0->rx_mat);
3876         readq(&bar0->rx_mat);
3877
3878         ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3879         /* We fail init if error or we get less vectors than min required */
3880         if (ret) {
3881                 DBG_PRINT(ERR_DBG, "s2io: Enabling MSI-X failed\n");
3882                 kfree(nic->entries);
3883                 nic->mac_control.stats_info->sw_stat.mem_freed
3884                         += (nic->num_entries * sizeof(struct msix_entry));
3885                 kfree(nic->s2io_entries);
3886                 nic->mac_control.stats_info->sw_stat.mem_freed
3887                         += (nic->num_entries * sizeof(struct s2io_msix_entry));
3888                 nic->entries = NULL;
3889                 nic->s2io_entries = NULL;
3890                 return -ENOMEM;
3891         }
3892
3893         /*
3894          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3895          * in the herc NIC. (Temp change, needs to be removed later)
3896          */
3897         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3898         msi_control |= 0x1; /* Enable MSI */
3899         pci_write_config_word(nic->pdev, 0x42, msi_control);
3900
3901         return 0;
3902 }
3903
3904 /* Handle software interrupt used during MSI(X) test */
3905 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3906 {
3907         struct s2io_nic *sp = dev_id;
3908
3909         sp->msi_detected = 1;
3910         wake_up(&sp->msi_wait);
3911
3912         return IRQ_HANDLED;
3913 }
3914
3915 /* Test interrupt path by forcing a a software IRQ */
3916 static int s2io_test_msi(struct s2io_nic *sp)
3917 {
3918         struct pci_dev *pdev = sp->pdev;
3919         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3920         int err;
3921         u64 val64, saved64;
3922
3923         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3924                           sp->name, sp);
3925         if (err) {
3926                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3927                           sp->dev->name, pci_name(pdev), pdev->irq);
3928                 return err;
3929         }
3930
3931         init_waitqueue_head(&sp->msi_wait);
3932         sp->msi_detected = 0;
3933
3934         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3935         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3936         val64 |= SCHED_INT_CTRL_TIMER_EN;
3937         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3938         writeq(val64, &bar0->scheduled_int_ctrl);
3939
3940         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3941
3942         if (!sp->msi_detected) {
3943                 /* MSI(X) test failed, go back to INTx mode */
3944                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3945                           "using MSI(X) during test\n", sp->dev->name,
3946                           pci_name(pdev));
3947
3948                 err = -EOPNOTSUPP;
3949         }
3950
3951         free_irq(sp->entries[1].vector, sp);
3952
3953         writeq(saved64, &bar0->scheduled_int_ctrl);
3954
3955         return err;
3956 }
3957
3958 static void remove_msix_isr(struct s2io_nic *sp)
3959 {
3960         int i;
3961         u16 msi_control;
3962
3963         for (i = 0; i < sp->num_entries; i++) {
3964                 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3965                         int vector = sp->entries[i].vector;
3966                         void *arg = sp->s2io_entries[i].arg;
3967                         free_irq(vector, arg);
3968                 }
3969         }
3970
3971         kfree(sp->entries);
3972         kfree(sp->s2io_entries);
3973         sp->entries = NULL;
3974         sp->s2io_entries = NULL;
3975
3976         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3977         msi_control &= 0xFFFE; /* Disable MSI */
3978         pci_write_config_word(sp->pdev, 0x42, msi_control);
3979
3980         pci_disable_msix(sp->pdev);
3981 }
3982
3983 static void remove_inta_isr(struct s2io_nic *sp)
3984 {
3985         struct net_device *dev = sp->dev;
3986
3987         free_irq(sp->pdev->irq, dev);
3988 }
3989
3990 /* ********************************************************* *
3991  * Functions defined below concern the OS part of the driver *
3992  * ********************************************************* */
3993
3994 /**
3995  *  s2io_open - open entry point of the driver
3996  *  @dev : pointer to the device structure.
3997  *  Description:
3998  *  This function is the open entry point of the driver. It mainly calls a
3999  *  function to allocate Rx buffers and inserts them into the buffer
4000  *  descriptors and then enables the Rx part of the NIC.
4001  *  Return value:
4002  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4003  *   file on failure.
4004  */
4005
4006 static int s2io_open(struct net_device *dev)
4007 {
4008         struct s2io_nic *sp = netdev_priv(dev);
4009         int err = 0;
4010
4011         /*
4012          * Make sure you have link off by default every time
4013          * Nic is initialized
4014          */
4015         netif_carrier_off(dev);
4016         sp->last_link_state = 0;
4017
4018         /* Initialize H/W and enable interrupts */
4019         err = s2io_card_up(sp);
4020         if (err) {
4021                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4022                           dev->name);
4023                 goto hw_init_failed;
4024         }
4025
4026         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4027                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4028                 s2io_card_down(sp);
4029                 err = -ENODEV;
4030                 goto hw_init_failed;
4031         }
4032         s2io_start_all_tx_queue(sp);
4033         return 0;
4034
4035 hw_init_failed:
4036         if (sp->config.intr_type == MSI_X) {
4037                 if (sp->entries) {
4038                         kfree(sp->entries);
4039                         sp->mac_control.stats_info->sw_stat.mem_freed
4040                                 += (sp->num_entries * sizeof(struct msix_entry));
4041                 }
4042                 if (sp->s2io_entries) {
4043                         kfree(sp->s2io_entries);
4044                         sp->mac_control.stats_info->sw_stat.mem_freed
4045                                 += (sp->num_entries * sizeof(struct s2io_msix_entry));
4046                 }
4047         }
4048         return err;
4049 }
4050
4051 /**
4052  *  s2io_close -close entry point of the driver
4053  *  @dev : device pointer.
4054  *  Description:
4055  *  This is the stop entry point of the driver. It needs to undo exactly
4056  *  whatever was done by the open entry point,thus it's usually referred to
4057  *  as the close function.Among other things this function mainly stops the
4058  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4059  *  Return value:
4060  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4061  *  file on failure.
4062  */
4063
4064 static int s2io_close(struct net_device *dev)
4065 {
4066         struct s2io_nic *sp = netdev_priv(dev);
4067         struct config_param *config = &sp->config;
4068         u64 tmp64;
4069         int offset;
4070
4071         /* Return if the device is already closed               *
4072          *  Can happen when s2io_card_up failed in change_mtu    *
4073          */
4074         if (!is_s2io_card_up(sp))
4075                 return 0;
4076
4077         s2io_stop_all_tx_queue(sp);
4078         /* delete all populated mac entries */
4079         for (offset = 1; offset < config->max_mc_addr; offset++) {
4080                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4081                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4082                         do_s2io_delete_unicast_mc(sp, tmp64);
4083         }
4084
4085         s2io_card_down(sp);
4086
4087         return 0;
4088 }
4089
4090 /**
4091  *  s2io_xmit - Tx entry point of te driver
4092  *  @skb : the socket buffer containing the Tx data.
4093  *  @dev : device pointer.
4094  *  Description :
4095  *  This function is the Tx entry point of the driver. S2IO NIC supports
4096  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4097  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4098  *  not be upadted.
4099  *  Return value:
4100  *  0 on success & 1 on failure.
4101  */
4102
4103 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4104 {
4105         struct s2io_nic *sp = netdev_priv(dev);
4106         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4107         register u64 val64;
4108         struct TxD *txdp;
4109         struct TxFIFO_element __iomem *tx_fifo;
4110         unsigned long flags = 0;
4111         u16 vlan_tag = 0;
4112         struct fifo_info *fifo = NULL;
4113         struct mac_info *mac_control;
4114         struct config_param *config;
4115         int do_spin_lock = 1;
4116         int offload_type;
4117         int enable_per_list_interrupt = 0;
4118         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4119
4120         mac_control = &sp->mac_control;
4121         config = &sp->config;
4122
4123         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4124
4125         if (unlikely(skb->len <= 0)) {
4126                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4127                 dev_kfree_skb_any(skb);
4128                 return NETDEV_TX_OK;
4129         }
4130
4131         if (!is_s2io_card_up(sp)) {
4132                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4133                           dev->name);
4134                 dev_kfree_skb(skb);
4135                 return NETDEV_TX_OK;
4136         }
4137
4138         queue = 0;
4139         if (sp->vlgrp && vlan_tx_tag_present(skb))
4140                 vlan_tag = vlan_tx_tag_get(skb);
4141         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4142                 if (skb->protocol == htons(ETH_P_IP)) {
4143                         struct iphdr *ip;
4144                         struct tcphdr *th;
4145                         ip = ip_hdr(skb);
4146
4147                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4148                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4149                                                        ip->ihl*4);
4150
4151                                 if (ip->protocol == IPPROTO_TCP) {
4152                                         queue_len = sp->total_tcp_fifos;
4153                                         queue = (ntohs(th->source) +
4154                                                  ntohs(th->dest)) &
4155                                                 sp->fifo_selector[queue_len - 1];
4156                                         if (queue >= queue_len)
4157                                                 queue = queue_len - 1;
4158                                 } else if (ip->protocol == IPPROTO_UDP) {
4159                                         queue_len = sp->total_udp_fifos;
4160                                         queue = (ntohs(th->source) +
4161                                                  ntohs(th->dest)) &
4162                                                 sp->fifo_selector[queue_len - 1];
4163                                         if (queue >= queue_len)
4164                                                 queue = queue_len - 1;
4165                                         queue += sp->udp_fifo_idx;
4166                                         if (skb->len > 1024)
4167                                                 enable_per_list_interrupt = 1;
4168                                         do_spin_lock = 0;
4169                                 }
4170                         }
4171                 }
4172         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4173                 /* get fifo number based on skb->priority value */
4174                 queue = config->fifo_mapping
4175                         [skb->priority & (MAX_TX_FIFOS - 1)];
4176         fifo = &mac_control->fifos[queue];
4177
4178         if (do_spin_lock)
4179                 spin_lock_irqsave(&fifo->tx_lock, flags);
4180         else {
4181                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4182                         return NETDEV_TX_LOCKED;
4183         }
4184
4185         if (sp->config.multiq) {
4186                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4187                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4188                         return NETDEV_TX_BUSY;
4189                 }
4190         } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4191                 if (netif_queue_stopped(dev)) {
4192                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4193                         return NETDEV_TX_BUSY;
4194                 }
4195         }
4196
4197         put_off = (u16)fifo->tx_curr_put_info.offset;
4198         get_off = (u16)fifo->tx_curr_get_info.offset;
4199         txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
4200
4201         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4202         /* Avoid "put" pointer going beyond "get" pointer */
4203         if (txdp->Host_Control ||
4204             ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4205                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4206                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4207                 dev_kfree_skb(skb);
4208                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4209                 return NETDEV_TX_OK;
4210         }
4211
4212         offload_type = s2io_offload_type(skb);
4213         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4214                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4215                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4216         }
4217         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4218                 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4219                                     TXD_TX_CKO_TCP_EN |
4220                                     TXD_TX_CKO_UDP_EN);
4221         }
4222         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4223         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4224         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4225         if (enable_per_list_interrupt)
4226                 if (put_off & (queue_len >> 5))
4227                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4228         if (vlan_tag) {
4229                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4230                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4231         }
4232
4233         frg_len = skb->len - skb->data_len;
4234         if (offload_type == SKB_GSO_UDP) {
4235                 int ufo_size;
4236
4237                 ufo_size = s2io_udp_mss(skb);
4238                 ufo_size &= ~7;
4239                 txdp->Control_1 |= TXD_UFO_EN;
4240                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4241                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4242 #ifdef __BIG_ENDIAN
4243                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4244                 fifo->ufo_in_band_v[put_off] =
4245                         (__force u64)skb_shinfo(skb)->ip6_frag_id;
4246 #else
4247                 fifo->ufo_in_band_v[put_off] =
4248                         (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4249 #endif
4250                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4251                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4252                                                       fifo->ufo_in_band_v,
4253                                                       sizeof(u64),
4254                                                       PCI_DMA_TODEVICE);
4255                 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4256                         goto pci_map_failed;
4257                 txdp++;
4258         }
4259
4260         txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4261                                               frg_len, PCI_DMA_TODEVICE);
4262         if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4263                 goto pci_map_failed;
4264
4265         txdp->Host_Control = (unsigned long)skb;
4266         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4267         if (offload_type == SKB_GSO_UDP)
4268                 txdp->Control_1 |= TXD_UFO_EN;
4269
4270         frg_cnt = skb_shinfo(skb)->nr_frags;
4271         /* For fragmented SKB. */
4272         for (i = 0; i < frg_cnt; i++) {
4273                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4274                 /* A '0' length fragment will be ignored */
4275                 if (!frag->size)
4276                         continue;
4277                 txdp++;
4278                 txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
4279                                                          frag->page_offset,
4280                                                          frag->size,
4281                                                          PCI_DMA_TODEVICE);
4282                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4283                 if (offload_type == SKB_GSO_UDP)
4284                         txdp->Control_1 |= TXD_UFO_EN;
4285         }
4286         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4287
4288         if (offload_type == SKB_GSO_UDP)
4289                 frg_cnt++; /* as Txd0 was used for inband header */
4290
4291         tx_fifo = mac_control->tx_FIFO_start[queue];
4292         val64 = fifo->list_info[put_off].list_phy_addr;
4293         writeq(val64, &tx_fifo->TxDL_Pointer);
4294
4295         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4296                  TX_FIFO_LAST_LIST);
4297         if (offload_type)
4298                 val64 |= TX_FIFO_SPECIAL_FUNC;
4299
4300         writeq(val64, &tx_fifo->List_Control);
4301
4302         mmiowb();
4303
4304         put_off++;
4305         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4306                 put_off = 0;
4307         fifo->tx_curr_put_info.offset = put_off;
4308
4309         /* Avoid "put" pointer going beyond "get" pointer */
4310         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4311                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4312                 DBG_PRINT(TX_DBG,
4313                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4314                           put_off, get_off);
4315                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4316         }
4317         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4318         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4319
4320         if (sp->config.intr_type == MSI_X)
4321                 tx_intr_handler(fifo);
4322
4323         return NETDEV_TX_OK;
4324 pci_map_failed:
4325         stats->pci_map_fail_cnt++;
4326         s2io_stop_tx_queue(sp, fifo->fifo_no);
4327         stats->mem_freed += skb->truesize;
4328         dev_kfree_skb(skb);
4329         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4330         return NETDEV_TX_OK;
4331 }
4332
4333 static void
4334 s2io_alarm_handle(unsigned long data)
4335 {
4336         struct s2io_nic *sp = (struct s2io_nic *)data;
4337         struct net_device *dev = sp->dev;
4338
4339         s2io_handle_errors(dev);
4340         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4341 }
4342
4343 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4344 {
4345         struct ring_info *ring = (struct ring_info *)dev_id;
4346         struct s2io_nic *sp = ring->nic;
4347         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4348
4349         if (unlikely(!is_s2io_card_up(sp)))
4350                 return IRQ_HANDLED;
4351
4352         if (sp->config.napi) {
4353                 u8 __iomem *addr = NULL;
4354                 u8 val8 = 0;
4355
4356                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4357                 addr += (7 - ring->ring_no);
4358                 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4359                 writeb(val8, addr);
4360                 val8 = readb(addr);
4361                 napi_schedule(&ring->napi);
4362         } else {
4363                 rx_intr_handler(ring, 0);
4364                 s2io_chk_rx_buffers(sp, ring);
4365         }
4366
4367         return IRQ_HANDLED;
4368 }
4369
4370 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4371 {
4372         int i;
4373         struct fifo_info *fifos = (struct fifo_info *)dev_id;
4374         struct s2io_nic *sp = fifos->nic;
4375         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4376         struct config_param *config  = &sp->config;
4377         u64 reason;
4378
4379         if (unlikely(!is_s2io_card_up(sp)))
4380                 return IRQ_NONE;
4381
4382         reason = readq(&bar0->general_int_status);
4383         if (unlikely(reason == S2IO_MINUS_ONE))
4384                 /* Nothing much can be done. Get out */
4385                 return IRQ_HANDLED;
4386
4387         if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4388                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4389
4390                 if (reason & GEN_INTR_TXPIC)
4391                         s2io_txpic_intr_handle(sp);
4392
4393                 if (reason & GEN_INTR_TXTRAFFIC)
4394                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4395
4396                 for (i = 0; i < config->tx_fifo_num; i++)
4397                         tx_intr_handler(&fifos[i]);
4398
4399                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4400                 readl(&bar0->general_int_status);
4401                 return IRQ_HANDLED;
4402         }
4403         /* The interrupt was not raised by us */
4404         return IRQ_NONE;
4405 }
4406
4407 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4408 {
4409         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4410         u64 val64;
4411
4412         val64 = readq(&bar0->pic_int_status);
4413         if (val64 & PIC_INT_GPIO) {
4414                 val64 = readq(&bar0->gpio_int_reg);
4415                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4416                     (val64 & GPIO_INT_REG_LINK_UP)) {
4417                         /*
4418                          * This is unstable state so clear both up/down
4419                          * interrupt and adapter to re-evaluate the link state.
4420                          */
4421                         val64 |= GPIO_INT_REG_LINK_DOWN;
4422                         val64 |= GPIO_INT_REG_LINK_UP;
4423                         writeq(val64, &bar0->gpio_int_reg);
4424                         val64 = readq(&bar0->gpio_int_mask);
4425                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4426                                    GPIO_INT_MASK_LINK_DOWN);
4427                         writeq(val64, &bar0->gpio_int_mask);
4428                 } else if (val64 & GPIO_INT_REG_LINK_UP) {
4429                         val64 = readq(&bar0->adapter_status);
4430                         /* Enable Adapter */
4431                         val64 = readq(&bar0->adapter_control);
4432                         val64 |= ADAPTER_CNTL_EN;
4433                         writeq(val64, &bar0->adapter_control);
4434                         val64 |= ADAPTER_LED_ON;
4435                         writeq(val64, &bar0->adapter_control);
4436                         if (!sp->device_enabled_once)
4437                                 sp->device_enabled_once = 1;
4438
4439                         s2io_link(sp, LINK_UP);
4440                         /*
4441                          * unmask link down interrupt and mask link-up
4442                          * intr
4443                          */
4444                         val64 = readq(&bar0->gpio_int_mask);
4445                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4446                         val64 |= GPIO_INT_MASK_LINK_UP;
4447                         writeq(val64, &bar0->gpio_int_mask);
4448
4449                 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4450                         val64 = readq(&bar0->adapter_status);
4451                         s2io_link(sp, LINK_DOWN);
4452                         /* Link is down so unmaks link up interrupt */
4453                         val64 = readq(&bar0->gpio_int_mask);
4454                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4455                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4456                         writeq(val64, &bar0->gpio_int_mask);
4457
4458                         /* turn off LED */
4459                         val64 = readq(&bar0->adapter_control);
4460                         val64 = val64 & (~ADAPTER_LED_ON);
4461                         writeq(val64, &bar0->adapter_control);
4462                 }
4463         }
4464         val64 = readq(&bar0->gpio_int_mask);
4465 }
4466
4467 /**
4468  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4469  *  @value: alarm bits
4470  *  @addr: address value
4471  *  @cnt: counter variable
4472  *  Description: Check for alarm and increment the counter
4473  *  Return Value:
4474  *  1 - if alarm bit set
4475  *  0 - if alarm bit is not set
4476  */
4477 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4478                                  unsigned long long *cnt)
4479 {
4480         u64 val64;
4481         val64 = readq(addr);
4482         if (val64 & value) {
4483                 writeq(val64, addr);
4484                 (*cnt)++;
4485                 return 1;
4486         }
4487         return 0;
4488
4489 }
4490
4491 /**
4492  *  s2io_handle_errors - Xframe error indication handler
4493  *  @nic: device private variable
4494  *  Description: Handle alarms such as loss of link, single or
4495  *  double ECC errors, critical and serious errors.
4496  *  Return Value:
4497  *  NONE
4498  */
4499 static void s2io_handle_errors(void *dev_id)
4500 {
4501         struct net_device *dev = (struct net_device *)dev_id;
4502         struct s2io_nic *sp = netdev_priv(dev);
4503         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4504         u64 temp64 = 0, val64 = 0;
4505         int i = 0;
4506
4507         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4508         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4509
4510         if (!is_s2io_card_up(sp))
4511                 return;
4512
4513         if (pci_channel_offline(sp->pdev))
4514                 return;
4515
4516         memset(&sw_stat->ring_full_cnt, 0,
4517                sizeof(sw_stat->ring_full_cnt));
4518
4519         /* Handling the XPAK counters update */
4520         if (stats->xpak_timer_count < 72000) {
4521                 /* waiting for an hour */
4522                 stats->xpak_timer_count++;
4523         } else {
4524                 s2io_updt_xpak_counter(dev);
4525                 /* reset the count to zero */
4526                 stats->xpak_timer_count = 0;
4527         }
4528
4529         /* Handling link status change error Intr */
4530         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4531                 val64 = readq(&bar0->mac_rmac_err_reg);
4532                 writeq(val64, &bar0->mac_rmac_err_reg);
4533                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4534                         schedule_work(&sp->set_link_task);
4535         }
4536
4537         /* In case of a serious error, the device will be Reset. */
4538         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4539                                   &sw_stat->serious_err_cnt))
4540                 goto reset;
4541
4542         /* Check for data parity error */
4543         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4544                                   &sw_stat->parity_err_cnt))
4545                 goto reset;
4546
4547         /* Check for ring full counter */
4548         if (sp->device_type == XFRAME_II_DEVICE) {
4549                 val64 = readq(&bar0->ring_bump_counter1);
4550                 for (i = 0; i < 4; i++) {
4551                         temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4552                         temp64 >>= 64 - ((i+1)*16);
4553                         sw_stat->ring_full_cnt[i] += temp64;
4554                 }
4555
4556                 val64 = readq(&bar0->ring_bump_counter2);
4557                 for (i = 0; i < 4; i++) {
4558                         temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4559                         temp64 >>= 64 - ((i+1)*16);
4560                         sw_stat->ring_full_cnt[i+4] += temp64;
4561                 }
4562         }
4563
4564         val64 = readq(&bar0->txdma_int_status);
4565         /*check for pfc_err*/
4566         if (val64 & TXDMA_PFC_INT) {
4567                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4568                                           PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4569                                           PFC_PCIX_ERR,
4570                                           &bar0->pfc_err_reg,
4571                                           &sw_stat->pfc_err_cnt))
4572                         goto reset;
4573                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4574                                       &bar0->pfc_err_reg,
4575                                       &sw_stat->pfc_err_cnt);
4576         }
4577
4578         /*check for tda_err*/
4579         if (val64 & TXDMA_TDA_INT) {
4580                 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4581                                           TDA_SM0_ERR_ALARM |
4582                                           TDA_SM1_ERR_ALARM,
4583                                           &bar0->tda_err_reg,
4584                                           &sw_stat->tda_err_cnt))
4585                         goto reset;
4586                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4587                                       &bar0->tda_err_reg,
4588                                       &sw_stat->tda_err_cnt);
4589         }
4590         /*check for pcc_err*/
4591         if (val64 & TXDMA_PCC_INT) {
4592                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4593                                           PCC_N_SERR | PCC_6_COF_OV_ERR |
4594                                           PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4595                                           PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4596                                           PCC_TXB_ECC_DB_ERR,
4597                                           &bar0->pcc_err_reg,
4598                                           &sw_stat->pcc_err_cnt))
4599                         goto reset;
4600                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4601                                       &bar0->pcc_err_reg,
4602                                       &sw_stat->pcc_err_cnt);
4603         }
4604
4605         /*check for tti_err*/
4606         if (val64 & TXDMA_TTI_INT) {
4607                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4608                                           &bar0->tti_err_reg,
4609                                           &sw_stat->tti_err_cnt))
4610                         goto reset;
4611                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4612                                       &bar0->tti_err_reg,
4613                                       &sw_stat->tti_err_cnt);
4614         }
4615
4616         /*check for lso_err*/
4617         if (val64 & TXDMA_LSO_INT) {
4618                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4619                                           LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4620                                           &bar0->lso_err_reg,
4621                                           &sw_stat->lso_err_cnt))
4622                         goto reset;
4623                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4624                                       &bar0->lso_err_reg,
4625                                       &sw_stat->lso_err_cnt);
4626         }
4627
4628         /*check for tpa_err*/
4629         if (val64 & TXDMA_TPA_INT) {
4630                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4631                                           &bar0->tpa_err_reg,
4632                                           &sw_stat->tpa_err_cnt))
4633                         goto reset;
4634                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4635                                       &bar0->tpa_err_reg,
4636                                       &sw_stat->tpa_err_cnt);
4637         }
4638
4639         /*check for sm_err*/
4640         if (val64 & TXDMA_SM_INT) {
4641                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4642                                           &bar0->sm_err_reg,
4643                                           &sw_stat->sm_err_cnt))
4644                         goto reset;
4645         }
4646
4647         val64 = readq(&bar0->mac_int_status);
4648         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4649                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4650                                           &bar0->mac_tmac_err_reg,
4651                                           &sw_stat->mac_tmac_err_cnt))
4652                         goto reset;
4653                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4654                                       TMAC_DESC_ECC_SG_ERR |
4655                                       TMAC_DESC_ECC_DB_ERR,
4656                                       &bar0->mac_tmac_err_reg,
4657                                       &sw_stat->mac_tmac_err_cnt);
4658         }
4659
4660         val64 = readq(&bar0->xgxs_int_status);
4661         if (val64 & XGXS_INT_STATUS_TXGXS) {
4662                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4663                                           &bar0->xgxs_txgxs_err_reg,
4664                                           &sw_stat->xgxs_txgxs_err_cnt))
4665                         goto reset;
4666                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4667                                       &bar0->xgxs_txgxs_err_reg,
4668                                       &sw_stat->xgxs_txgxs_err_cnt);
4669         }
4670
4671         val64 = readq(&bar0->rxdma_int_status);
4672         if (val64 & RXDMA_INT_RC_INT_M) {
4673                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4674                                           RC_FTC_ECC_DB_ERR |
4675                                           RC_PRCn_SM_ERR_ALARM |
4676                                           RC_FTC_SM_ERR_ALARM,
4677                                           &bar0->rc_err_reg,
4678                                           &sw_stat->rc_err_cnt))
4679                         goto reset;
4680                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4681                                       RC_FTC_ECC_SG_ERR |
4682                                       RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4683                                       &sw_stat->rc_err_cnt);
4684                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4685                                           PRC_PCI_AB_WR_Rn |
4686                                           PRC_PCI_AB_F_WR_Rn,
4687                                           &bar0->prc_pcix_err_reg,
4688                                           &sw_stat->prc_pcix_err_cnt))
4689                         goto reset;
4690                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4691                                       PRC_PCI_DP_WR_Rn |
4692                                       PRC_PCI_DP_F_WR_Rn,
4693                                       &bar0->prc_pcix_err_reg,
4694                                       &sw_stat->prc_pcix_err_cnt);
4695         }
4696
4697         if (val64 & RXDMA_INT_RPA_INT_M) {
4698                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4699                                           &bar0->rpa_err_reg,
4700                                           &sw_stat->rpa_err_cnt))
4701                         goto reset;
4702                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4703                                       &bar0->rpa_err_reg,
4704                                       &sw_stat->rpa_err_cnt);
4705         }
4706
4707         if (val64 & RXDMA_INT_RDA_INT_M) {
4708                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4709                                           RDA_FRM_ECC_DB_N_AERR |
4710                                           RDA_SM1_ERR_ALARM |
4711                                           RDA_SM0_ERR_ALARM |
4712                                           RDA_RXD_ECC_DB_SERR,
4713                                           &bar0->rda_err_reg,
4714                                           &sw_stat->rda_err_cnt))
4715                         goto reset;
4716                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4717                                       RDA_FRM_ECC_SG_ERR |
4718                                       RDA_MISC_ERR |
4719                                       RDA_PCIX_ERR,
4720                                       &bar0->rda_err_reg,
4721                                       &sw_stat->rda_err_cnt);
4722         }
4723
4724         if (val64 & RXDMA_INT_RTI_INT_M) {
4725                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4726                                           &bar0->rti_err_reg,
4727                                           &sw_stat->rti_err_cnt))
4728                         goto reset;
4729                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4730                                       &bar0->rti_err_reg,
4731                                       &sw_stat->rti_err_cnt);
4732         }
4733
4734         val64 = readq(&bar0->mac_int_status);
4735         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4736                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4737                                           &bar0->mac_rmac_err_reg,
4738                                           &sw_stat->mac_rmac_err_cnt))
4739                         goto reset;
4740                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4741                                       RMAC_SINGLE_ECC_ERR |
4742                                       RMAC_DOUBLE_ECC_ERR,
4743                                       &bar0->mac_rmac_err_reg,
4744                                       &sw_stat->mac_rmac_err_cnt);
4745         }
4746
4747         val64 = readq(&bar0->xgxs_int_status);
4748         if (val64 & XGXS_INT_STATUS_RXGXS) {
4749                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4750                                           &bar0->xgxs_rxgxs_err_reg,
4751                                           &sw_stat->xgxs_rxgxs_err_cnt))
4752                         goto reset;
4753         }
4754
4755         val64 = readq(&bar0->mc_int_status);
4756         if (val64 & MC_INT_STATUS_MC_INT) {
4757                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4758                                           &bar0->mc_err_reg,
4759                                           &sw_stat->mc_err_cnt))
4760                         goto reset;
4761
4762                 /* Handling Ecc errors */
4763                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4764                         writeq(val64, &bar0->mc_err_reg);
4765                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4766                                 sw_stat->double_ecc_errs++;
4767                                 if (sp->device_type != XFRAME_II_DEVICE) {
4768                                         /*
4769                                          * Reset XframeI only if critical error
4770                                          */
4771                                         if (val64 &
4772                                             (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4773                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4774                                                 goto reset;
4775                                 }
4776                         } else
4777                                 sw_stat->single_ecc_errs++;
4778                 }
4779         }
4780         return;
4781
4782 reset:
4783         s2io_stop_all_tx_queue(sp);
4784         schedule_work(&sp->rst_timer_task);
4785         sw_stat->soft_reset_cnt++;
4786         return;
4787 }
4788
4789 /**
4790  *  s2io_isr - ISR handler of the device .
4791  *  @irq: the irq of the device.
4792  *  @dev_id: a void pointer to the dev structure of the NIC.
4793  *  Description:  This function is the ISR handler of the device. It
4794  *  identifies the reason for the interrupt and calls the relevant
4795  *  service routines. As a contongency measure, this ISR allocates the
4796  *  recv buffers, if their numbers are below the panic value which is
4797  *  presently set to 25% of the original number of rcv buffers allocated.
4798  *  Return value:
4799  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4800  *   IRQ_NONE: will be returned if interrupt is not from our device
4801  */
4802 static irqreturn_t s2io_isr(int irq, void *dev_id)
4803 {
4804         struct net_device *dev = (struct net_device *)dev_id;
4805         struct s2io_nic *sp = netdev_priv(dev);
4806         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4807         int i;
4808         u64 reason = 0;
4809         struct mac_info *mac_control;
4810         struct config_param *config;
4811
4812         /* Pretend we handled any irq's from a disconnected card */
4813         if (pci_channel_offline(sp->pdev))
4814                 return IRQ_NONE;
4815
4816         if (!is_s2io_card_up(sp))
4817                 return IRQ_NONE;
4818
4819         mac_control = &sp->mac_control;
4820         config = &sp->config;
4821
4822         /*
4823          * Identify the cause for interrupt and call the appropriate
4824          * interrupt handler. Causes for the interrupt could be;
4825          * 1. Rx of packet.
4826          * 2. Tx complete.
4827          * 3. Link down.
4828          */
4829         reason = readq(&bar0->general_int_status);
4830
4831         if (unlikely(reason == S2IO_MINUS_ONE))
4832                 return IRQ_HANDLED;     /* Nothing much can be done. Get out */
4833
4834         if (reason &
4835             (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4836                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4837
4838                 if (config->napi) {
4839                         if (reason & GEN_INTR_RXTRAFFIC) {
4840                                 napi_schedule(&sp->napi);
4841                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4842                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4843                                 readl(&bar0->rx_traffic_int);
4844                         }
4845                 } else {
4846                         /*
4847                          * rx_traffic_int reg is an R1 register, writing all 1's
4848                          * will ensure that the actual interrupt causing bit
4849                          * get's cleared and hence a read can be avoided.
4850                          */
4851                         if (reason & GEN_INTR_RXTRAFFIC)
4852                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4853
4854                         for (i = 0; i < config->rx_ring_num; i++) {
4855                                 struct ring_info *ring = &mac_control->rings[i];
4856
4857                                 rx_intr_handler(ring, 0);
4858                         }
4859                 }
4860
4861                 /*
4862                  * tx_traffic_int reg is an R1 register, writing all 1's
4863                  * will ensure that the actual interrupt causing bit get's
4864                  * cleared and hence a read can be avoided.
4865                  */
4866                 if (reason & GEN_INTR_TXTRAFFIC)
4867                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4868
4869                 for (i = 0; i < config->tx_fifo_num; i++)
4870                         tx_intr_handler(&mac_control->fifos[i]);
4871
4872                 if (reason & GEN_INTR_TXPIC)
4873                         s2io_txpic_intr_handle(sp);
4874
4875                 /*
4876                  * Reallocate the buffers from the interrupt handler itself.
4877                  */
4878                 if (!config->napi) {
4879                         for (i = 0; i < config->rx_ring_num; i++) {
4880                                 struct ring_info *ring = &mac_control->rings[i];
4881
4882                                 s2io_chk_rx_buffers(sp, ring);
4883                         }
4884                 }
4885                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4886                 readl(&bar0->general_int_status);
4887
4888                 return IRQ_HANDLED;
4889
4890         } else if (!reason) {
4891                 /* The interrupt was not raised by us */
4892                 return IRQ_NONE;
4893         }
4894
4895         return IRQ_HANDLED;
4896 }
4897
4898 /**
4899  * s2io_updt_stats -
4900  */
4901 static void s2io_updt_stats(struct s2io_nic *sp)
4902 {
4903         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4904         u64 val64;
4905         int cnt = 0;
4906
4907         if (is_s2io_card_up(sp)) {
4908                 /* Apprx 30us on a 133 MHz bus */
4909                 val64 = SET_UPDT_CLICKS(10) |
4910                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4911                 writeq(val64, &bar0->stat_cfg);
4912                 do {
4913                         udelay(100);
4914                         val64 = readq(&bar0->stat_cfg);
4915                         if (!(val64 & s2BIT(0)))
4916                                 break;
4917                         cnt++;
4918                         if (cnt == 5)
4919                                 break; /* Updt failed */
4920                 } while (1);
4921         }
4922 }
4923
4924 /**
4925  *  s2io_get_stats - Updates the device statistics structure.
4926  *  @dev : pointer to the device structure.
4927  *  Description:
4928  *  This function updates the device statistics structure in the s2io_nic
4929  *  structure and returns a pointer to the same.
4930  *  Return value:
4931  *  pointer to the updated net_device_stats structure.
4932  */
4933
4934 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4935 {
4936         struct s2io_nic *sp = netdev_priv(dev);
4937         struct mac_info *mac_control;
4938         struct config_param *config;
4939         int i;
4940
4941
4942         mac_control = &sp->mac_control;
4943         config = &sp->config;
4944
4945         /* Configure Stats for immediate updt */
4946         s2io_updt_stats(sp);
4947
4948         /* Using sp->stats as a staging area, because reset (due to mtu
4949            change, for example) will clear some hardware counters */
4950         dev->stats.tx_packets +=
4951                 le32_to_cpu(mac_control->stats_info->tmac_frms) -
4952                 sp->stats.tx_packets;
4953         sp->stats.tx_packets =
4954                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4955         dev->stats.tx_errors +=
4956                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
4957                 sp->stats.tx_errors;
4958         sp->stats.tx_errors =
4959                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4960         dev->stats.rx_errors +=
4961                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
4962                 sp->stats.rx_errors;
4963         sp->stats.rx_errors =
4964                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4965         dev->stats.multicast =
4966                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) -
4967                 sp->stats.multicast;
4968         sp->stats.multicast =
4969                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4970         dev->stats.rx_length_errors =
4971                 le64_to_cpu(mac_control->stats_info->rmac_long_frms) -
4972                 sp->stats.rx_length_errors;
4973         sp->stats.rx_length_errors =
4974                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4975
4976         /* collect per-ring rx_packets and rx_bytes */
4977         dev->stats.rx_packets = dev->stats.rx_bytes = 0;
4978         for (i = 0; i < config->rx_ring_num; i++) {
4979                 struct ring_info *ring = &mac_control->rings[i];
4980
4981                 dev->stats.rx_packets += ring->rx_packets;
4982                 dev->stats.rx_bytes += ring->rx_bytes;
4983         }
4984
4985         return &dev->stats;
4986 }
4987
4988 /**
4989  *  s2io_set_multicast - entry point for multicast address enable/disable.
4990  *  @dev : pointer to the device structure
4991  *  Description:
4992  *  This function is a driver entry point which gets called by the kernel
4993  *  whenever multicast addresses must be enabled/disabled. This also gets
4994  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4995  *  determine, if multicast address must be enabled or if promiscuous mode
4996  *  is to be disabled etc.
4997  *  Return value:
4998  *  void.
4999  */
5000
5001 static void s2io_set_multicast(struct net_device *dev)
5002 {
5003         int i, j, prev_cnt;
5004         struct dev_mc_list *mclist;
5005         struct s2io_nic *sp = netdev_priv(dev);
5006         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5007         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
5008                 0xfeffffffffffULL;
5009         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
5010         void __iomem *add;
5011         struct config_param *config = &sp->config;
5012
5013         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
5014                 /*  Enable all Multicast addresses */
5015                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
5016                        &bar0->rmac_addr_data0_mem);
5017                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
5018                        &bar0->rmac_addr_data1_mem);
5019                 val64 = RMAC_ADDR_CMD_MEM_WE |
5020                         RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5021                         RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
5022                 writeq(val64, &bar0->rmac_addr_cmd_mem);
5023                 /* Wait till command completes */
5024                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5025                                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5026                                       S2IO_BIT_RESET);
5027
5028                 sp->m_cast_flg = 1;
5029                 sp->all_multi_pos = config->max_mc_addr - 1;
5030         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
5031                 /*  Disable all Multicast addresses */
5032                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5033                        &bar0->rmac_addr_data0_mem);
5034                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
5035                        &bar0->rmac_addr_data1_mem);
5036                 val64 = RMAC_ADDR_CMD_MEM_WE |
5037                         RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5038                         RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
5039                 writeq(val64, &bar0->rmac_addr_cmd_mem);
5040                 /* Wait till command completes */
5041                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5042                                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5043                                       S2IO_BIT_RESET);
5044
5045                 sp->m_cast_flg = 0;
5046                 sp->all_multi_pos = 0;
5047         }
5048
5049         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5050                 /*  Put the NIC into promiscuous mode */
5051                 add = &bar0->mac_cfg;
5052                 val64 = readq(&bar0->mac_cfg);
5053                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5054
5055                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5056                 writel((u32)val64, add);
5057                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5058                 writel((u32) (val64 >> 32), (add + 4));
5059
5060                 if (vlan_tag_strip != 1) {
5061                         val64 = readq(&bar0->rx_pa_cfg);
5062                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5063                         writeq(val64, &bar0->rx_pa_cfg);
5064                         sp->vlan_strip_flag = 0;
5065                 }
5066
5067                 val64 = readq(&bar0->mac_cfg);
5068                 sp->promisc_flg = 1;
5069                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5070                           dev->name);
5071         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5072                 /*  Remove the NIC from promiscuous mode */
5073                 add = &bar0->mac_cfg;
5074                 val64 = readq(&bar0->mac_cfg);
5075                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5076
5077                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5078                 writel((u32)val64, add);
5079                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5080                 writel((u32) (val64 >> 32), (add + 4));
5081
5082                 if (vlan_tag_strip != 0) {
5083                         val64 = readq(&bar0->rx_pa_cfg);
5084                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5085                         writeq(val64, &bar0->rx_pa_cfg);
5086                         sp->vlan_strip_flag = 1;
5087                 }
5088
5089                 val64 = readq(&bar0->mac_cfg);
5090                 sp->promisc_flg = 0;
5091                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5092                           dev->name);
5093         }
5094
5095         /*  Update individual M_CAST address list */
5096         if ((!sp->m_cast_flg) && dev->mc_count) {
5097                 if (dev->mc_count >
5098                     (config->max_mc_addr - config->max_mac_addr)) {
5099                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5100                                   dev->name);
5101                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5102                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5103                         return;
5104                 }
5105
5106                 prev_cnt = sp->mc_addr_count;
5107                 sp->mc_addr_count = dev->mc_count;
5108
5109                 /* Clear out the previous list of Mc in the H/W. */
5110                 for (i = 0; i < prev_cnt; i++) {
5111                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5112                                &bar0->rmac_addr_data0_mem);
5113                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5114                                &bar0->rmac_addr_data1_mem);
5115                         val64 = RMAC_ADDR_CMD_MEM_WE |
5116                                 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5117                                 RMAC_ADDR_CMD_MEM_OFFSET
5118                                 (config->mc_start_offset + i);
5119                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5120
5121                         /* Wait for command completes */
5122                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5123                                                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5124                                                   S2IO_BIT_RESET)) {
5125                                 DBG_PRINT(ERR_DBG, "%s: Adding ", dev->name);
5126                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5127                                 return;
5128                         }
5129                 }
5130
5131                 /* Create the new Rx filter list and update the same in H/W. */
5132                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5133                      i++, mclist = mclist->next) {
5134                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5135                                ETH_ALEN);
5136                         mac_addr = 0;
5137                         for (j = 0; j < ETH_ALEN; j++) {
5138                                 mac_addr |= mclist->dmi_addr[j];
5139                                 mac_addr <<= 8;
5140                         }
5141                         mac_addr >>= 8;
5142                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5143                                &bar0->rmac_addr_data0_mem);
5144                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5145                                &bar0->rmac_addr_data1_mem);
5146                         val64 = RMAC_ADDR_CMD_MEM_WE |
5147                                 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5148                                 RMAC_ADDR_CMD_MEM_OFFSET
5149                                 (i + config->mc_start_offset);
5150                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5151
5152                         /* Wait for command completes */
5153                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5154                                                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5155                                                   S2IO_BIT_RESET)) {
5156                                 DBG_PRINT(ERR_DBG, "%s: Adding ", dev->name);
5157                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5158                                 return;
5159                         }
5160                 }
5161         }
5162 }
5163
5164 /* read from CAM unicast & multicast addresses and store it in
5165  * def_mac_addr structure
5166  */
5167 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5168 {
5169         int offset;
5170         u64 mac_addr = 0x0;
5171         struct config_param *config = &sp->config;
5172
5173         /* store unicast & multicast mac addresses */
5174         for (offset = 0; offset < config->max_mc_addr; offset++) {
5175                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5176                 /* if read fails disable the entry */
5177                 if (mac_addr == FAILURE)
5178                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5179                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5180         }
5181 }
5182
5183 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5184 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5185 {
5186         int offset;
5187         struct config_param *config = &sp->config;
5188         /* restore unicast mac address */
5189         for (offset = 0; offset < config->max_mac_addr; offset++)
5190                 do_s2io_prog_unicast(sp->dev,
5191                                      sp->def_mac_addr[offset].mac_addr);
5192
5193         /* restore multicast mac address */
5194         for (offset = config->mc_start_offset;
5195              offset < config->max_mc_addr; offset++)
5196                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5197 }
5198
5199 /* add a multicast MAC address to CAM */
5200 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5201 {
5202         int i;
5203         u64 mac_addr = 0;
5204         struct config_param *config = &sp->config;
5205
5206         for (i = 0; i < ETH_ALEN; i++) {
5207                 mac_addr <<= 8;
5208                 mac_addr |= addr[i];
5209         }
5210         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5211                 return SUCCESS;
5212
5213         /* check if the multicast mac already preset in CAM */
5214         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5215                 u64 tmp64;
5216                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5217                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5218                         break;
5219
5220                 if (tmp64 == mac_addr)
5221                         return SUCCESS;
5222         }
5223         if (i == config->max_mc_addr) {
5224                 DBG_PRINT(ERR_DBG,
5225                           "CAM full no space left for multicast MAC\n");
5226                 return FAILURE;
5227         }
5228         /* Update the internal structure with this new mac address */
5229         do_s2io_copy_mac_addr(sp, i, mac_addr);
5230
5231         return do_s2io_add_mac(sp, mac_addr, i);
5232 }
5233
5234 /* add MAC address to CAM */
5235 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5236 {
5237         u64 val64;
5238         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5239
5240         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5241                &bar0->rmac_addr_data0_mem);
5242
5243         val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5244                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5245         writeq(val64, &bar0->rmac_addr_cmd_mem);
5246
5247         /* Wait till command completes */
5248         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5249                                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5250                                   S2IO_BIT_RESET)) {
5251                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5252                 return FAILURE;
5253         }
5254         return SUCCESS;
5255 }
5256 /* deletes a specified unicast/multicast mac entry from CAM */
5257 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5258 {
5259         int offset;
5260         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5261         struct config_param *config = &sp->config;
5262
5263         for (offset = 1;
5264              offset < config->max_mc_addr; offset++) {
5265                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5266                 if (tmp64 == addr) {
5267                         /* disable the entry by writing  0xffffffffffffULL */
5268                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5269                                 return FAILURE;
5270                         /* store the new mac list from CAM */
5271                         do_s2io_store_unicast_mc(sp);
5272                         return SUCCESS;
5273                 }
5274         }
5275         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5276                   (unsigned long long)addr);
5277         return FAILURE;
5278 }
5279
5280 /* read mac entries from CAM */
5281 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5282 {
5283         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5284         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5285
5286         /* read mac addr */
5287         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5288                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5289         writeq(val64, &bar0->rmac_addr_cmd_mem);
5290
5291         /* Wait till command completes */
5292         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5293                                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5294                                   S2IO_BIT_RESET)) {
5295                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5296                 return FAILURE;
5297         }
5298         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5299
5300         return tmp64 >> 16;
5301 }
5302
5303 /**
5304  * s2io_set_mac_addr driver entry point
5305  */
5306
5307 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5308 {
5309         struct sockaddr *addr = p;
5310
5311         if (!is_valid_ether_addr(addr->sa_data))
5312                 return -EINVAL;
5313
5314         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5315
5316         /* store the MAC address in CAM */
5317         return do_s2io_prog_unicast(dev, dev->dev_addr);
5318 }
5319 /**
5320  *  do_s2io_prog_unicast - Programs the Xframe mac address
5321  *  @dev : pointer to the device structure.
5322  *  @addr: a uchar pointer to the new mac address which is to be set.
5323  *  Description : This procedure will program the Xframe to receive
5324  *  frames with new Mac Address
5325  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5326  *  as defined in errno.h file on failure.
5327  */
5328
5329 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5330 {
5331         struct s2io_nic *sp = netdev_priv(dev);
5332         register u64 mac_addr = 0, perm_addr = 0;
5333         int i;
5334         u64 tmp64;
5335         struct config_param *config = &sp->config;
5336
5337         /*
5338          * Set the new MAC address as the new unicast filter and reflect this
5339          * change on the device address registered with the OS. It will be
5340          * at offset 0.
5341          */
5342         for (i = 0; i < ETH_ALEN; i++) {
5343                 mac_addr <<= 8;
5344                 mac_addr |= addr[i];
5345                 perm_addr <<= 8;
5346                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5347         }
5348
5349         /* check if the dev_addr is different than perm_addr */
5350         if (mac_addr == perm_addr)
5351                 return SUCCESS;
5352
5353         /* check if the mac already preset in CAM */
5354         for (i = 1; i < config->max_mac_addr; i++) {
5355                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5356                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5357                         break;
5358
5359                 if (tmp64 == mac_addr) {
5360                         DBG_PRINT(INFO_DBG,
5361                                   "MAC addr:0x%llx already present in CAM\n",
5362                                   (unsigned long long)mac_addr);
5363                         return SUCCESS;
5364                 }
5365         }
5366         if (i == config->max_mac_addr) {
5367                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5368                 return FAILURE;
5369         }
5370         /* Update the internal structure with this new mac address */
5371         do_s2io_copy_mac_addr(sp, i, mac_addr);
5372
5373         return do_s2io_add_mac(sp, mac_addr, i);
5374 }
5375
5376 /**
5377  * s2io_ethtool_sset - Sets different link parameters.
5378  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5379  * @info: pointer to the structure with parameters given by ethtool to set
5380  * link information.
5381  * Description:
5382  * The function sets different link parameters provided by the user onto
5383  * the NIC.
5384  * Return value:
5385  * 0 on success.
5386  */
5387
5388 static int s2io_ethtool_sset(struct net_device *dev,
5389                              struct ethtool_cmd *info)
5390 {
5391         struct s2io_nic *sp = netdev_priv(dev);
5392         if ((info->autoneg == AUTONEG_ENABLE) ||
5393             (info->speed != SPEED_10000) ||
5394             (info->duplex != DUPLEX_FULL))
5395                 return -EINVAL;
5396         else {
5397                 s2io_close(sp->dev);
5398                 s2io_open(sp->dev);
5399         }
5400
5401         return 0;
5402 }
5403
5404 /**
5405  * s2io_ethtol_gset - Return link specific information.
5406  * @sp : private member of the device structure, pointer to the
5407  *      s2io_nic structure.
5408  * @info : pointer to the structure with parameters given by ethtool
5409  * to return link information.
5410  * Description:
5411  * Returns link specific information like speed, duplex etc.. to ethtool.
5412  * Return value :
5413  * return 0 on success.
5414  */
5415
5416 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5417 {
5418         struct s2io_nic *sp = netdev_priv(dev);
5419         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5420         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5421         info->port = PORT_FIBRE;
5422
5423         /* info->transceiver */
5424         info->transceiver = XCVR_EXTERNAL;
5425
5426         if (netif_carrier_ok(sp->dev)) {
5427                 info->speed = 10000;
5428                 info->duplex = DUPLEX_FULL;
5429         } else {
5430                 info->speed = -1;
5431                 info->duplex = -1;
5432         }
5433
5434         info->autoneg = AUTONEG_DISABLE;
5435         return 0;
5436 }
5437
5438 /**
5439  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5440  * @sp : private member of the device structure, which is a pointer to the
5441  * s2io_nic structure.
5442  * @info : pointer to the structure with parameters given by ethtool to
5443  * return driver information.
5444  * Description:
5445  * Returns driver specefic information like name, version etc.. to ethtool.
5446  * Return value:
5447  *  void
5448  */
5449
5450 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5451                                   struct ethtool_drvinfo *info)
5452 {
5453         struct s2io_nic *sp = netdev_priv(dev);
5454
5455         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5456         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5457         strncpy(info->fw_version, "", sizeof(info->fw_version));
5458         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5459         info->regdump_len = XENA_REG_SPACE;
5460         info->eedump_len = XENA_EEPROM_SPACE;
5461 }
5462
5463 /**
5464  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5465  *  @sp: private member of the device structure, which is a pointer to the
5466  *  s2io_nic structure.
5467  *  @regs : pointer to the structure with parameters given by ethtool for
5468  *  dumping the registers.
5469  *  @reg_space: The input argumnet into which all the registers are dumped.
5470  *  Description:
5471  *  Dumps the entire register space of xFrame NIC into the user given
5472  *  buffer area.
5473  * Return value :
5474  * void .
5475  */
5476
5477 static void s2io_ethtool_gregs(struct net_device *dev,
5478                                struct ethtool_regs *regs, void *space)
5479 {
5480         int i;
5481         u64 reg;
5482         u8 *reg_space = (u8 *)space;
5483         struct s2io_nic *sp = netdev_priv(dev);
5484
5485         regs->len = XENA_REG_SPACE;
5486         regs->version = sp->pdev->subsystem_device;
5487
5488         for (i = 0; i < regs->len; i += 8) {
5489                 reg = readq(sp->bar0 + i);
5490                 memcpy((reg_space + i), &reg, 8);
5491         }
5492 }
5493
5494 /**
5495  *  s2io_phy_id  - timer function that alternates adapter LED.
5496  *  @data : address of the private member of the device structure, which
5497  *  is a pointer to the s2io_nic structure, provided as an u32.
5498  * Description: This is actually the timer function that alternates the
5499  * adapter LED bit of the adapter control bit to set/reset every time on
5500  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5501  *  once every second.
5502  */
5503 static void s2io_phy_id(unsigned long data)
5504 {
5505         struct s2io_nic *sp = (struct s2io_nic *)data;
5506         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5507         u64 val64 = 0;
5508         u16 subid;
5509
5510         subid = sp->pdev->subsystem_device;
5511         if ((sp->device_type == XFRAME_II_DEVICE) ||
5512             ((subid & 0xFF) >= 0x07)) {
5513                 val64 = readq(&bar0->gpio_control);
5514                 val64 ^= GPIO_CTRL_GPIO_0;
5515                 writeq(val64, &bar0->gpio_control);
5516         } else {
5517                 val64 = readq(&bar0->adapter_control);
5518                 val64 ^= ADAPTER_LED_ON;
5519                 writeq(val64, &bar0->adapter_control);
5520         }
5521
5522         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5523 }
5524
5525 /**
5526  * s2io_ethtool_idnic - To physically identify the nic on the system.
5527  * @sp : private member of the device structure, which is a pointer to the
5528  * s2io_nic structure.
5529  * @id : pointer to the structure with identification parameters given by
5530  * ethtool.
5531  * Description: Used to physically identify the NIC on the system.
5532  * The Link LED will blink for a time specified by the user for
5533  * identification.
5534  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5535  * identification is possible only if it's link is up.
5536  * Return value:
5537  * int , returns 0 on success
5538  */
5539
5540 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5541 {
5542         u64 val64 = 0, last_gpio_ctrl_val;
5543         struct s2io_nic *sp = netdev_priv(dev);
5544         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5545         u16 subid;
5546
5547         subid = sp->pdev->subsystem_device;
5548         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5549         if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5550                 val64 = readq(&bar0->adapter_control);
5551                 if (!(val64 & ADAPTER_CNTL_EN)) {
5552                         pr_err("Adapter Link down, cannot blink LED\n");
5553                         return -EFAULT;
5554                 }
5555         }
5556         if (sp->id_timer.function == NULL) {
5557                 init_timer(&sp->id_timer);
5558                 sp->id_timer.function = s2io_phy_id;
5559                 sp->id_timer.data = (unsigned long)sp;
5560         }
5561         mod_timer(&sp->id_timer, jiffies);
5562         if (data)
5563                 msleep_interruptible(data * HZ);
5564         else
5565                 msleep_interruptible(MAX_FLICKER_TIME);
5566         del_timer_sync(&sp->id_timer);
5567
5568         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5569                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5570                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5571         }
5572
5573         return 0;
5574 }
5575
5576 static void s2io_ethtool_gringparam(struct net_device *dev,
5577                                     struct ethtool_ringparam *ering)
5578 {
5579         struct s2io_nic *sp = netdev_priv(dev);
5580         int i, tx_desc_count = 0, rx_desc_count = 0;
5581
5582         if (sp->rxd_mode == RXD_MODE_1)
5583                 ering->rx_max_pending = MAX_RX_DESC_1;
5584         else if (sp->rxd_mode == RXD_MODE_3B)
5585                 ering->rx_max_pending = MAX_RX_DESC_2;
5586
5587         ering->tx_max_pending = MAX_TX_DESC;
5588         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5589                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5590
5591         DBG_PRINT(INFO_DBG, "\nmax txds : %d\n", sp->config.max_txds);
5592         ering->tx_pending = tx_desc_count;
5593         rx_desc_count = 0;
5594         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5595                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5596
5597         ering->rx_pending = rx_desc_count;
5598
5599         ering->rx_mini_max_pending = 0;
5600         ering->rx_mini_pending = 0;
5601         if (sp->rxd_mode == RXD_MODE_1)
5602                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5603         else if (sp->rxd_mode == RXD_MODE_3B)
5604                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5605         ering->rx_jumbo_pending = rx_desc_count;
5606 }
5607
5608 /**
5609  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5610  * @sp : private member of the device structure, which is a pointer to the
5611  *      s2io_nic structure.
5612  * @ep : pointer to the structure with pause parameters given by ethtool.
5613  * Description:
5614  * Returns the Pause frame generation and reception capability of the NIC.
5615  * Return value:
5616  *  void
5617  */
5618 static void s2io_ethtool_getpause_data(struct net_device *dev,
5619                                        struct ethtool_pauseparam *ep)
5620 {
5621         u64 val64;
5622         struct s2io_nic *sp = netdev_priv(dev);
5623         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5624
5625         val64 = readq(&bar0->rmac_pause_cfg);
5626         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5627                 ep->tx_pause = true;
5628         if (val64 & RMAC_PAUSE_RX_ENABLE)
5629                 ep->rx_pause = true;
5630         ep->autoneg = false;
5631 }
5632
5633 /**
5634  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5635  * @sp : private member of the device structure, which is a pointer to the
5636  *      s2io_nic structure.
5637  * @ep : pointer to the structure with pause parameters given by ethtool.
5638  * Description:
5639  * It can be used to set or reset Pause frame generation or reception
5640  * support of the NIC.
5641  * Return value:
5642  * int, returns 0 on Success
5643  */
5644
5645 static int s2io_ethtool_setpause_data(struct net_device *dev,
5646                                       struct ethtool_pauseparam *ep)
5647 {
5648         u64 val64;
5649         struct s2io_nic *sp = netdev_priv(dev);
5650         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5651
5652         val64 = readq(&bar0->rmac_pause_cfg);
5653         if (ep->tx_pause)
5654                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5655         else
5656                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5657         if (ep->rx_pause)
5658                 val64 |= RMAC_PAUSE_RX_ENABLE;
5659         else
5660                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5661         writeq(val64, &bar0->rmac_pause_cfg);
5662         return 0;
5663 }
5664
5665 /**
5666  * read_eeprom - reads 4 bytes of data from user given offset.
5667  * @sp : private member of the device structure, which is a pointer to the
5668  *      s2io_nic structure.
5669  * @off : offset at which the data must be written
5670  * @data : Its an output parameter where the data read at the given
5671  *      offset is stored.
5672  * Description:
5673  * Will read 4 bytes of data from the user given offset and return the
5674  * read data.
5675  * NOTE: Will allow to read only part of the EEPROM visible through the
5676  *   I2C bus.
5677  * Return value:
5678  *  -1 on failure and 0 on success.
5679  */
5680
5681 #define S2IO_DEV_ID             5
5682 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5683 {
5684         int ret = -1;
5685         u32 exit_cnt = 0;
5686         u64 val64;
5687         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5688
5689         if (sp->device_type == XFRAME_I_DEVICE) {
5690                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5691                         I2C_CONTROL_ADDR(off) |
5692                         I2C_CONTROL_BYTE_CNT(0x3) |
5693                         I2C_CONTROL_READ |
5694                         I2C_CONTROL_CNTL_START;
5695                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5696
5697                 while (exit_cnt < 5) {
5698                         val64 = readq(&bar0->i2c_control);
5699                         if (I2C_CONTROL_CNTL_END(val64)) {
5700                                 *data = I2C_CONTROL_GET_DATA(val64);
5701                                 ret = 0;
5702                                 break;
5703                         }
5704                         msleep(50);
5705                         exit_cnt++;
5706                 }
5707         }
5708
5709         if (sp->device_type == XFRAME_II_DEVICE) {
5710                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5711                         SPI_CONTROL_BYTECNT(0x3) |
5712                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5713                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5714                 val64 |= SPI_CONTROL_REQ;
5715                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5716                 while (exit_cnt < 5) {
5717                         val64 = readq(&bar0->spi_control);
5718                         if (val64 & SPI_CONTROL_NACK) {
5719                                 ret = 1;
5720                                 break;
5721                         } else if (val64 & SPI_CONTROL_DONE) {
5722                                 *data = readq(&bar0->spi_data);
5723                                 *data &= 0xffffff;
5724                                 ret = 0;
5725                                 break;
5726                         }
5727                         msleep(50);
5728                         exit_cnt++;
5729                 }
5730         }
5731         return ret;
5732 }
5733
5734 /**
5735  *  write_eeprom - actually writes the relevant part of the data value.
5736  *  @sp : private member of the device structure, which is a pointer to the
5737  *       s2io_nic structure.
5738  *  @off : offset at which the data must be written
5739  *  @data : The data that is to be written
5740  *  @cnt : Number of bytes of the data that are actually to be written into
5741  *  the Eeprom. (max of 3)
5742  * Description:
5743  *  Actually writes the relevant part of the data value into the Eeprom
5744  *  through the I2C bus.
5745  * Return value:
5746  *  0 on success, -1 on failure.
5747  */
5748
5749 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5750 {
5751         int exit_cnt = 0, ret = -1;
5752         u64 val64;
5753         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5754
5755         if (sp->device_type == XFRAME_I_DEVICE) {
5756                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5757                         I2C_CONTROL_ADDR(off) |
5758                         I2C_CONTROL_BYTE_CNT(cnt) |
5759                         I2C_CONTROL_SET_DATA((u32)data) |
5760                         I2C_CONTROL_CNTL_START;
5761                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5762
5763                 while (exit_cnt < 5) {
5764                         val64 = readq(&bar0->i2c_control);
5765                         if (I2C_CONTROL_CNTL_END(val64)) {
5766                                 if (!(val64 & I2C_CONTROL_NACK))
5767                                         ret = 0;
5768                                 break;
5769                         }
5770                         msleep(50);
5771                         exit_cnt++;
5772                 }
5773         }
5774
5775         if (sp->device_type == XFRAME_II_DEVICE) {
5776                 int write_cnt = (cnt == 8) ? 0 : cnt;
5777                 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5778
5779                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5780                         SPI_CONTROL_BYTECNT(write_cnt) |
5781                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5782                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5783                 val64 |= SPI_CONTROL_REQ;
5784                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5785                 while (exit_cnt < 5) {
5786                         val64 = readq(&bar0->spi_control);
5787                         if (val64 & SPI_CONTROL_NACK) {
5788                                 ret = 1;
5789                                 break;
5790                         } else if (val64 & SPI_CONTROL_DONE) {
5791                                 ret = 0;
5792                                 break;
5793                         }
5794                         msleep(50);
5795                         exit_cnt++;
5796                 }
5797         }
5798         return ret;
5799 }
5800 static void s2io_vpd_read(struct s2io_nic *nic)
5801 {
5802         u8 *vpd_data;
5803         u8 data;
5804         int i = 0, cnt, fail = 0;
5805         int vpd_addr = 0x80;
5806
5807         if (nic->device_type == XFRAME_II_DEVICE) {
5808                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5809                 vpd_addr = 0x80;
5810         } else {
5811                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5812                 vpd_addr = 0x50;
5813         }
5814         strcpy(nic->serial_num, "NOT AVAILABLE");
5815
5816         vpd_data = kmalloc(256, GFP_KERNEL);
5817         if (!vpd_data) {
5818                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5819                 return;
5820         }
5821         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5822
5823         for (i = 0; i < 256; i += 4) {
5824                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5825                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5826                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5827                 for (cnt = 0; cnt < 5; cnt++) {
5828                         msleep(2);
5829                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5830                         if (data == 0x80)
5831                                 break;
5832                 }
5833                 if (cnt >= 5) {
5834                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5835                         fail = 1;
5836                         break;
5837                 }
5838                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5839                                       (u32 *)&vpd_data[i]);
5840         }
5841
5842         if (!fail) {
5843                 /* read serial number of adapter */
5844                 for (cnt = 0; cnt < 256; cnt++) {
5845                         if ((vpd_data[cnt] == 'S') &&
5846                             (vpd_data[cnt+1] == 'N') &&
5847                             (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5848                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5849                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5850                                        vpd_data[cnt+2]);
5851                                 break;
5852                         }
5853                 }
5854         }
5855
5856         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5857                 memset(nic->product_name, 0, vpd_data[1]);
5858                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5859         }
5860         kfree(vpd_data);
5861         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5862 }
5863
5864 /**
5865  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5866  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5867  *  @eeprom : pointer to the user level structure provided by ethtool,
5868  *  containing all relevant information.
5869  *  @data_buf : user defined value to be written into Eeprom.
5870  *  Description: Reads the values stored in the Eeprom at given offset
5871  *  for a given length. Stores these values int the input argument data
5872  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5873  *  Return value:
5874  *  int  0 on success
5875  */
5876
5877 static int s2io_ethtool_geeprom(struct net_device *dev,
5878                                 struct ethtool_eeprom *eeprom, u8 * data_buf)
5879 {
5880         u32 i, valid;
5881         u64 data;
5882         struct s2io_nic *sp = netdev_priv(dev);
5883
5884         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5885
5886         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5887                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5888
5889         for (i = 0; i < eeprom->len; i += 4) {
5890                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5891                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5892                         return -EFAULT;
5893                 }
5894                 valid = INV(data);
5895                 memcpy((data_buf + i), &valid, 4);
5896         }
5897         return 0;
5898 }
5899
5900 /**
5901  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5902  *  @sp : private member of the device structure, which is a pointer to the
5903  *  s2io_nic structure.
5904  *  @eeprom : pointer to the user level structure provided by ethtool,
5905  *  containing all relevant information.
5906  *  @data_buf ; user defined value to be written into Eeprom.
5907  *  Description:
5908  *  Tries to write the user provided value in the Eeprom, at the offset
5909  *  given by the user.
5910  *  Return value:
5911  *  0 on success, -EFAULT on failure.
5912  */
5913
5914 static int s2io_ethtool_seeprom(struct net_device *dev,
5915                                 struct ethtool_eeprom *eeprom,
5916                                 u8 *data_buf)
5917 {
5918         int len = eeprom->len, cnt = 0;
5919         u64 valid = 0, data;
5920         struct s2io_nic *sp = netdev_priv(dev);
5921
5922         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5923                 DBG_PRINT(ERR_DBG,
5924                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5925                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n", eeprom->magic);
5926                 return -EFAULT;
5927         }
5928
5929         while (len) {
5930                 data = (u32)data_buf[cnt] & 0x000000FF;
5931                 if (data)
5932                         valid = (u32)(data << 24);
5933                 else
5934                         valid = data;
5935
5936                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5937                         DBG_PRINT(ERR_DBG,
5938                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5939                         DBG_PRINT(ERR_DBG,
5940                                   "write into the specified offset\n");
5941                         return -EFAULT;
5942                 }
5943                 cnt++;
5944                 len--;
5945         }
5946
5947         return 0;
5948 }
5949
5950 /**
5951  * s2io_register_test - reads and writes into all clock domains.
5952  * @sp : private member of the device structure, which is a pointer to the
5953  * s2io_nic structure.
5954  * @data : variable that returns the result of each of the test conducted b
5955  * by the driver.
5956  * Description:
5957  * Read and write into all clock domains. The NIC has 3 clock domains,
5958  * see that registers in all the three regions are accessible.
5959  * Return value:
5960  * 0 on success.
5961  */
5962
5963 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5964 {
5965         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5966         u64 val64 = 0, exp_val;
5967         int fail = 0;
5968
5969         val64 = readq(&bar0->pif_rd_swapper_fb);
5970         if (val64 != 0x123456789abcdefULL) {
5971                 fail = 1;
5972                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5973         }
5974
5975         val64 = readq(&bar0->rmac_pause_cfg);
5976         if (val64 != 0xc000ffff00000000ULL) {
5977                 fail = 1;
5978                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5979         }
5980
5981         val64 = readq(&bar0->rx_queue_cfg);
5982         if (sp->device_type == XFRAME_II_DEVICE)
5983                 exp_val = 0x0404040404040404ULL;
5984         else
5985                 exp_val = 0x0808080808080808ULL;
5986         if (val64 != exp_val) {
5987                 fail = 1;
5988                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5989         }
5990
5991         val64 = readq(&bar0->xgxs_efifo_cfg);
5992         if (val64 != 0x000000001923141EULL) {
5993                 fail = 1;
5994                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5995         }
5996
5997         val64 = 0x5A5A5A5A5A5A5A5AULL;
5998         writeq(val64, &bar0->xmsi_data);
5999         val64 = readq(&bar0->xmsi_data);
6000         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
6001                 fail = 1;
6002                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
6003         }
6004
6005         val64 = 0xA5A5A5A5A5A5A5A5ULL;
6006         writeq(val64, &bar0->xmsi_data);
6007         val64 = readq(&bar0->xmsi_data);
6008         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
6009                 fail = 1;
6010                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
6011         }
6012
6013         *data = fail;
6014         return fail;
6015 }
6016
6017 /**
6018  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
6019  * @sp : private member of the device structure, which is a pointer to the
6020  * s2io_nic structure.
6021  * @data:variable that returns the result of each of the test conducted by
6022  * the driver.
6023  * Description:
6024  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
6025  * register.
6026  * Return value:
6027  * 0 on success.
6028  */
6029
6030 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
6031 {
6032         int fail = 0;
6033         u64 ret_data, org_4F0, org_7F0;
6034         u8 saved_4F0 = 0, saved_7F0 = 0;
6035         struct net_device *dev = sp->dev;
6036
6037         /* Test Write Error at offset 0 */
6038         /* Note that SPI interface allows write access to all areas
6039          * of EEPROM. Hence doing all negative testing only for Xframe I.
6040          */
6041         if (sp->device_type == XFRAME_I_DEVICE)
6042                 if (!write_eeprom(sp, 0, 0, 3))
6043                         fail = 1;
6044
6045         /* Save current values at offsets 0x4F0 and 0x7F0 */
6046         if (!read_eeprom(sp, 0x4F0, &org_4F0))
6047                 saved_4F0 = 1;
6048         if (!read_eeprom(sp, 0x7F0, &org_7F0))
6049                 saved_7F0 = 1;
6050
6051         /* Test Write at offset 4f0 */
6052         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
6053                 fail = 1;
6054         if (read_eeprom(sp, 0x4F0, &ret_data))
6055                 fail = 1;
6056
6057         if (ret_data != 0x012345) {
6058                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6059                           "Data written %llx Data read %llx\n",
6060                           dev->name, (unsigned long long)0x12345,
6061                           (unsigned long long)ret_data);
6062                 fail = 1;
6063         }
6064
6065         /* Reset the EEPROM data go FFFF */
6066         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6067
6068         /* Test Write Request Error at offset 0x7c */
6069         if (sp->device_type == XFRAME_I_DEVICE)
6070                 if (!write_eeprom(sp, 0x07C, 0, 3))
6071                         fail = 1;
6072
6073         /* Test Write Request at offset 0x7f0 */
6074         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6075                 fail = 1;
6076         if (read_eeprom(sp, 0x7F0, &ret_data))
6077                 fail = 1;
6078
6079         if (ret_data != 0x012345) {
6080                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6081                           "Data written %llx Data read %llx\n",
6082                           dev->name, (unsigned long long)0x12345,
6083                           (unsigned long long)ret_data);
6084                 fail = 1;
6085         }
6086
6087         /* Reset the EEPROM data go FFFF */
6088         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6089
6090         if (sp->device_type == XFRAME_I_DEVICE) {
6091                 /* Test Write Error at offset 0x80 */
6092                 if (!write_eeprom(sp, 0x080, 0, 3))
6093                         fail = 1;
6094
6095                 /* Test Write Error at offset 0xfc */
6096                 if (!write_eeprom(sp, 0x0FC, 0, 3))
6097                         fail = 1;
6098
6099                 /* Test Write Error at offset 0x100 */
6100                 if (!write_eeprom(sp, 0x100, 0, 3))
6101                         fail = 1;
6102
6103                 /* Test Write Error at offset 4ec */
6104                 if (!write_eeprom(sp, 0x4EC, 0, 3))
6105                         fail = 1;
6106         }
6107
6108         /* Restore values at offsets 0x4F0 and 0x7F0 */
6109         if (saved_4F0)
6110                 write_eeprom(sp, 0x4F0, org_4F0, 3);
6111         if (saved_7F0)
6112                 write_eeprom(sp, 0x7F0, org_7F0, 3);
6113
6114         *data = fail;
6115         return fail;
6116 }
6117
6118 /**
6119  * s2io_bist_test - invokes the MemBist test of the card .
6120  * @sp : private member of the device structure, which is a pointer to the
6121  * s2io_nic structure.
6122  * @data:variable that returns the result of each of the test conducted by
6123  * the driver.
6124  * Description:
6125  * This invokes the MemBist test of the card. We give around
6126  * 2 secs time for the Test to complete. If it's still not complete
6127  * within this peiod, we consider that the test failed.
6128  * Return value:
6129  * 0 on success and -1 on failure.
6130  */
6131
6132 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6133 {
6134         u8 bist = 0;
6135         int cnt = 0, ret = -1;
6136
6137         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6138         bist |= PCI_BIST_START;
6139         pci_write_config_word(sp->pdev, PCI_BIST, bist);
6140
6141         while (cnt < 20) {
6142                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6143                 if (!(bist & PCI_BIST_START)) {
6144                         *data = (bist & PCI_BIST_CODE_MASK);
6145                         ret = 0;
6146                         break;
6147                 }
6148                 msleep(100);
6149                 cnt++;
6150         }
6151
6152         return ret;
6153 }
6154
6155 /**
6156  * s2io-link_test - verifies the link state of the nic
6157  * @sp ; private member of the device structure, which is a pointer to the
6158  * s2io_nic structure.
6159  * @data: variable that returns the result of each of the test conducted by
6160  * the driver.
6161  * Description:
6162  * The function verifies the link state of the NIC and updates the input
6163  * argument 'data' appropriately.
6164  * Return value:
6165  * 0 on success.
6166  */
6167
6168 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6169 {
6170         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6171         u64 val64;
6172
6173         val64 = readq(&bar0->adapter_status);
6174         if (!(LINK_IS_UP(val64)))
6175                 *data = 1;
6176         else
6177                 *data = 0;
6178
6179         return *data;
6180 }
6181
6182 /**
6183  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6184  * @sp - private member of the device structure, which is a pointer to the
6185  * s2io_nic structure.
6186  * @data - variable that returns the result of each of the test
6187  * conducted by the driver.
6188  * Description:
6189  *  This is one of the offline test that tests the read and write
6190  *  access to the RldRam chip on the NIC.
6191  * Return value:
6192  *  0 on success.
6193  */
6194
6195 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6196 {
6197         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6198         u64 val64;
6199         int cnt, iteration = 0, test_fail = 0;
6200
6201         val64 = readq(&bar0->adapter_control);
6202         val64 &= ~ADAPTER_ECC_EN;
6203         writeq(val64, &bar0->adapter_control);
6204
6205         val64 = readq(&bar0->mc_rldram_test_ctrl);
6206         val64 |= MC_RLDRAM_TEST_MODE;
6207         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6208
6209         val64 = readq(&bar0->mc_rldram_mrs);
6210         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6211         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6212
6213         val64 |= MC_RLDRAM_MRS_ENABLE;
6214         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6215
6216         while (iteration < 2) {
6217                 val64 = 0x55555555aaaa0000ULL;
6218                 if (iteration == 1)
6219                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6220                 writeq(val64, &bar0->mc_rldram_test_d0);
6221
6222                 val64 = 0xaaaa5a5555550000ULL;
6223                 if (iteration == 1)
6224                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6225                 writeq(val64, &bar0->mc_rldram_test_d1);
6226
6227                 val64 = 0x55aaaaaaaa5a0000ULL;
6228                 if (iteration == 1)
6229                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6230                 writeq(val64, &bar0->mc_rldram_test_d2);
6231
6232                 val64 = (u64) (0x0000003ffffe0100ULL);
6233                 writeq(val64, &bar0->mc_rldram_test_add);
6234
6235                 val64 = MC_RLDRAM_TEST_MODE |
6236                         MC_RLDRAM_TEST_WRITE |
6237                         MC_RLDRAM_TEST_GO;
6238                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6239
6240                 for (cnt = 0; cnt < 5; cnt++) {
6241                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6242                         if (val64 & MC_RLDRAM_TEST_DONE)
6243                                 break;
6244                         msleep(200);
6245                 }
6246
6247                 if (cnt == 5)
6248                         break;
6249
6250                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6251                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6252
6253                 for (cnt = 0; cnt < 5; cnt++) {
6254                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6255                         if (val64 & MC_RLDRAM_TEST_DONE)
6256                                 break;
6257                         msleep(500);
6258                 }
6259
6260                 if (cnt == 5)
6261                         break;
6262
6263                 val64 = readq(&bar0->mc_rldram_test_ctrl);
6264                 if (!(val64 & MC_RLDRAM_TEST_PASS))
6265                         test_fail = 1;
6266
6267                 iteration++;
6268         }
6269
6270         *data = test_fail;
6271
6272         /* Bring the adapter out of test mode */
6273         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6274
6275         return test_fail;
6276 }
6277
6278 /**
6279  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6280  *  @sp : private member of the device structure, which is a pointer to the
6281  *  s2io_nic structure.
6282  *  @ethtest : pointer to a ethtool command specific structure that will be
6283  *  returned to the user.
6284  *  @data : variable that returns the result of each of the test
6285  * conducted by the driver.
6286  * Description:
6287  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6288  *  the health of the card.
6289  * Return value:
6290  *  void
6291  */
6292
6293 static void s2io_ethtool_test(struct net_device *dev,
6294                               struct ethtool_test *ethtest,
6295                               uint64_t *data)
6296 {
6297         struct s2io_nic *sp = netdev_priv(dev);
6298         int orig_state = netif_running(sp->dev);
6299
6300         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6301                 /* Offline Tests. */
6302                 if (orig_state)
6303                         s2io_close(sp->dev);
6304
6305                 if (s2io_register_test(sp, &data[0]))
6306                         ethtest->flags |= ETH_TEST_FL_FAILED;
6307
6308                 s2io_reset(sp);
6309
6310                 if (s2io_rldram_test(sp, &data[3]))
6311                         ethtest->flags |= ETH_TEST_FL_FAILED;
6312
6313                 s2io_reset(sp);
6314
6315                 if (s2io_eeprom_test(sp, &data[1]))
6316                         ethtest->flags |= ETH_TEST_FL_FAILED;
6317
6318                 if (s2io_bist_test(sp, &data[4]))
6319                         ethtest->flags |= ETH_TEST_FL_FAILED;
6320
6321                 if (orig_state)
6322                         s2io_open(sp->dev);
6323
6324                 data[2] = 0;
6325         } else {
6326                 /* Online Tests. */
6327                 if (!orig_state) {
6328                         DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6329                                   dev->name);
6330                         data[0] = -1;
6331                         data[1] = -1;
6332                         data[2] = -1;
6333                         data[3] = -1;
6334                         data[4] = -1;
6335                 }
6336
6337                 if (s2io_link_test(sp, &data[2]))
6338                         ethtest->flags |= ETH_TEST_FL_FAILED;
6339
6340                 data[0] = 0;
6341                 data[1] = 0;
6342                 data[3] = 0;
6343                 data[4] = 0;
6344         }
6345 }
6346
6347 static void s2io_get_ethtool_stats(struct net_device *dev,
6348                                    struct ethtool_stats *estats,
6349                                    u64 *tmp_stats)
6350 {
6351         int i = 0, k;
6352         struct s2io_nic *sp = netdev_priv(dev);
6353         struct stat_block *stat_info = sp->mac_control.stats_info;
6354
6355         s2io_updt_stats(sp);
6356         tmp_stats[i++] =
6357                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
6358                 le32_to_cpu(stat_info->tmac_frms);
6359         tmp_stats[i++] =
6360                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6361                 le32_to_cpu(stat_info->tmac_data_octets);
6362         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6363         tmp_stats[i++] =
6364                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6365                 le32_to_cpu(stat_info->tmac_mcst_frms);
6366         tmp_stats[i++] =
6367                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6368                 le32_to_cpu(stat_info->tmac_bcst_frms);
6369         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6370         tmp_stats[i++] =
6371                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6372                 le32_to_cpu(stat_info->tmac_ttl_octets);
6373         tmp_stats[i++] =
6374                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6375                 le32_to_cpu(stat_info->tmac_ucst_frms);
6376         tmp_stats[i++] =
6377                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6378                 le32_to_cpu(stat_info->tmac_nucst_frms);
6379         tmp_stats[i++] =
6380                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6381                 le32_to_cpu(stat_info->tmac_any_err_frms);
6382         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6383         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6384         tmp_stats[i++] =
6385                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6386                 le32_to_cpu(stat_info->tmac_vld_ip);
6387         tmp_stats[i++] =
6388                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6389                 le32_to_cpu(stat_info->tmac_drop_ip);
6390         tmp_stats[i++] =
6391                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6392                 le32_to_cpu(stat_info->tmac_icmp);
6393         tmp_stats[i++] =
6394                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6395                 le32_to_cpu(stat_info->tmac_rst_tcp);
6396         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6397         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6398                 le32_to_cpu(stat_info->tmac_udp);
6399         tmp_stats[i++] =
6400                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6401                 le32_to_cpu(stat_info->rmac_vld_frms);
6402         tmp_stats[i++] =
6403                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6404                 le32_to_cpu(stat_info->rmac_data_octets);
6405         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6406         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6407         tmp_stats[i++] =
6408                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6409                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6410         tmp_stats[i++] =
6411                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6412                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6413         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6414         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6415         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6416         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6417         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6418         tmp_stats[i++] =
6419                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6420                 le32_to_cpu(stat_info->rmac_ttl_octets);
6421         tmp_stats[i++] =
6422                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow) << 32
6423                 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6424         tmp_stats[i++] =
6425                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6426                 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6427         tmp_stats[i++] =
6428                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6429                 le32_to_cpu(stat_info->rmac_discarded_frms);
6430         tmp_stats[i++] =
6431                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6432                 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6433         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6434         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6435         tmp_stats[i++] =
6436                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6437                 le32_to_cpu(stat_info->rmac_usized_frms);
6438         tmp_stats[i++] =
6439                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6440                 le32_to_cpu(stat_info->rmac_osized_frms);
6441         tmp_stats[i++] =
6442                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6443                 le32_to_cpu(stat_info->rmac_frag_frms);
6444         tmp_stats[i++] =
6445                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6446                 le32_to_cpu(stat_info->rmac_jabber_frms);
6447         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6448         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6449         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6450         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6451         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6452         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6453         tmp_stats[i++] =
6454                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6455                 le32_to_cpu(stat_info->rmac_ip);
6456         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6457         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6458         tmp_stats[i++] =
6459                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6460                 le32_to_cpu(stat_info->rmac_drop_ip);
6461         tmp_stats[i++] =
6462                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6463                 le32_to_cpu(stat_info->rmac_icmp);
6464         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6465         tmp_stats[i++] =
6466                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6467                 le32_to_cpu(stat_info->rmac_udp);
6468         tmp_stats[i++] =
6469                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6470                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6471         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6472         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6473         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6474         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6475         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6476         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6477         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6478         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6479         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6480         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6481         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6482         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6483         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6484         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6485         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6486         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6487         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6488         tmp_stats[i++] =
6489                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6490                 le32_to_cpu(stat_info->rmac_pause_cnt);
6491         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6492         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6493         tmp_stats[i++] =
6494                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6495                 le32_to_cpu(stat_info->rmac_accepted_ip);
6496         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6497         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6498         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6499         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6500         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6501         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6502         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6503         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6504         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6505         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6506         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6507         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6508         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6509         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6510         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6511         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6512         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6513         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6514         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6515
6516         /* Enhanced statistics exist only for Hercules */
6517         if (sp->device_type == XFRAME_II_DEVICE) {
6518                 tmp_stats[i++] =
6519                         le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6520                 tmp_stats[i++] =
6521                         le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6522                 tmp_stats[i++] =
6523                         le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6524                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6525                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6526                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6527                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6528                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6529                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6530                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6531                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6532                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6533                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6534                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6535                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6536                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6537         }
6538
6539         tmp_stats[i++] = 0;
6540         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6541         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6542         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6543         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6544         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6545         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6546         for (k = 0; k < MAX_RX_RINGS; k++)
6547                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6548         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6549         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6550         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6551         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6552         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6553         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6554         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6555         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6556         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6557         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6558         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6559         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6560         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6561         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6562         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6563         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6564         if (stat_info->sw_stat.num_aggregations) {
6565                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6566                 int count = 0;
6567                 /*
6568                  * Since 64-bit divide does not work on all platforms,
6569                  * do repeated subtraction.
6570                  */
6571                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6572                         tmp -= stat_info->sw_stat.num_aggregations;
6573                         count++;
6574                 }
6575                 tmp_stats[i++] = count;
6576         } else
6577                 tmp_stats[i++] = 0;
6578         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6579         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6580         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6581         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6582         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6583         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6584         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6585         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6586         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6587
6588         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6589         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6590         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6591         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6592         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6593
6594         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6595         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6596         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6597         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6598         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6599         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6600         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6601         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6602         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6603         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6604         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6605         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6606         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6607         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6608         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6609         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6610         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6611         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6612         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6613         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6614         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6615         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6616         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6617         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6618         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6619         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6620 }
6621
6622 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6623 {
6624         return XENA_REG_SPACE;
6625 }
6626
6627
6628 static u32 s2io_ethtool_get_rx_csum(struct net_device *dev)
6629 {
6630         struct s2io_nic *sp = netdev_priv(dev);
6631
6632         return sp->rx_csum;
6633 }
6634
6635 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6636 {
6637         struct s2io_nic *sp = netdev_priv(dev);
6638
6639         if (data)
6640                 sp->rx_csum = 1;
6641         else
6642                 sp->rx_csum = 0;
6643
6644         return 0;
6645 }
6646
6647 static int s2io_get_eeprom_len(struct net_device *dev)
6648 {
6649         return XENA_EEPROM_SPACE;
6650 }
6651
6652 static int s2io_get_sset_count(struct net_device *dev, int sset)
6653 {
6654         struct s2io_nic *sp = netdev_priv(dev);
6655
6656         switch (sset) {
6657         case ETH_SS_TEST:
6658                 return S2IO_TEST_LEN;
6659         case ETH_SS_STATS:
6660                 switch (sp->device_type) {
6661                 case XFRAME_I_DEVICE:
6662                         return XFRAME_I_STAT_LEN;
6663                 case XFRAME_II_DEVICE:
6664                         return XFRAME_II_STAT_LEN;
6665                 default:
6666                         return 0;
6667                 }
6668         default:
6669                 return -EOPNOTSUPP;
6670         }
6671 }
6672
6673 static void s2io_ethtool_get_strings(struct net_device *dev,
6674                                      u32 stringset, u8 *data)
6675 {
6676         int stat_size = 0;
6677         struct s2io_nic *sp = netdev_priv(dev);
6678
6679         switch (stringset) {
6680         case ETH_SS_TEST:
6681                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6682                 break;
6683         case ETH_SS_STATS:
6684                 stat_size = sizeof(ethtool_xena_stats_keys);
6685                 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6686                 if (sp->device_type == XFRAME_II_DEVICE) {
6687                         memcpy(data + stat_size,
6688                                &ethtool_enhanced_stats_keys,
6689                                sizeof(ethtool_enhanced_stats_keys));
6690                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6691                 }
6692
6693                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6694                        sizeof(ethtool_driver_stats_keys));
6695         }
6696 }
6697
6698 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6699 {
6700         if (data)
6701                 dev->features |= NETIF_F_IP_CSUM;
6702         else
6703                 dev->features &= ~NETIF_F_IP_CSUM;
6704
6705         return 0;
6706 }
6707
6708 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6709 {
6710         return (dev->features & NETIF_F_TSO) != 0;
6711 }
6712 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6713 {
6714         if (data)
6715                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6716         else
6717                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6718
6719         return 0;
6720 }
6721
6722 static const struct ethtool_ops netdev_ethtool_ops = {
6723         .get_settings = s2io_ethtool_gset,
6724         .set_settings = s2io_ethtool_sset,
6725         .get_drvinfo = s2io_ethtool_gdrvinfo,
6726         .get_regs_len = s2io_ethtool_get_regs_len,
6727         .get_regs = s2io_ethtool_gregs,
6728         .get_link = ethtool_op_get_link,
6729         .get_eeprom_len = s2io_get_eeprom_len,
6730         .get_eeprom = s2io_ethtool_geeprom,
6731         .set_eeprom = s2io_ethtool_seeprom,
6732         .get_ringparam = s2io_ethtool_gringparam,
6733         .get_pauseparam = s2io_ethtool_getpause_data,
6734         .set_pauseparam = s2io_ethtool_setpause_data,
6735         .get_rx_csum = s2io_ethtool_get_rx_csum,
6736         .set_rx_csum = s2io_ethtool_set_rx_csum,
6737         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6738         .set_sg = ethtool_op_set_sg,
6739         .get_tso = s2io_ethtool_op_get_tso,
6740         .set_tso = s2io_ethtool_op_set_tso,
6741         .set_ufo = ethtool_op_set_ufo,
6742         .self_test = s2io_ethtool_test,
6743         .get_strings = s2io_ethtool_get_strings,
6744         .phys_id = s2io_ethtool_idnic,
6745         .get_ethtool_stats = s2io_get_ethtool_stats,
6746         .get_sset_count = s2io_get_sset_count,
6747 };
6748
6749 /**
6750  *  s2io_ioctl - Entry point for the Ioctl
6751  *  @dev :  Device pointer.
6752  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6753  *  a proprietary structure used to pass information to the driver.
6754  *  @cmd :  This is used to distinguish between the different commands that
6755  *  can be passed to the IOCTL functions.
6756  *  Description:
6757  *  Currently there are no special functionality supported in IOCTL, hence
6758  *  function always return EOPNOTSUPPORTED
6759  */
6760
6761 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6762 {
6763         return -EOPNOTSUPP;
6764 }
6765
6766 /**
6767  *  s2io_change_mtu - entry point to change MTU size for the device.
6768  *   @dev : device pointer.
6769  *   @new_mtu : the new MTU size for the device.
6770  *   Description: A driver entry point to change MTU size for the device.
6771  *   Before changing the MTU the device must be stopped.
6772  *  Return value:
6773  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6774  *   file on failure.
6775  */
6776
6777 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6778 {
6779         struct s2io_nic *sp = netdev_priv(dev);
6780         int ret = 0;
6781
6782         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6783                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
6784                 return -EPERM;
6785         }
6786
6787         dev->mtu = new_mtu;
6788         if (netif_running(dev)) {
6789                 s2io_stop_all_tx_queue(sp);
6790                 s2io_card_down(sp);
6791                 ret = s2io_card_up(sp);
6792                 if (ret) {
6793                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6794                                   __func__);
6795                         return ret;
6796                 }
6797                 s2io_wake_all_tx_queue(sp);
6798         } else { /* Device is down */
6799                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6800                 u64 val64 = new_mtu;
6801
6802                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6803         }
6804
6805         return ret;
6806 }
6807
6808 /**
6809  * s2io_set_link - Set the LInk status
6810  * @data: long pointer to device private structue
6811  * Description: Sets the link status for the adapter
6812  */
6813
6814 static void s2io_set_link(struct work_struct *work)
6815 {
6816         struct s2io_nic *nic = container_of(work, struct s2io_nic,
6817                                             set_link_task);
6818         struct net_device *dev = nic->dev;
6819         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6820         register u64 val64;
6821         u16 subid;
6822
6823         rtnl_lock();
6824
6825         if (!netif_running(dev))
6826                 goto out_unlock;
6827
6828         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6829                 /* The card is being reset, no point doing anything */
6830                 goto out_unlock;
6831         }
6832
6833         subid = nic->pdev->subsystem_device;
6834         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6835                 /*
6836                  * Allow a small delay for the NICs self initiated
6837                  * cleanup to complete.
6838                  */
6839                 msleep(100);
6840         }
6841
6842         val64 = readq(&bar0->adapter_status);
6843         if (LINK_IS_UP(val64)) {
6844                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6845                         if (verify_xena_quiescence(nic)) {
6846                                 val64 = readq(&bar0->adapter_control);
6847                                 val64 |= ADAPTER_CNTL_EN;
6848                                 writeq(val64, &bar0->adapter_control);
6849                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6850                                             nic->device_type, subid)) {
6851                                         val64 = readq(&bar0->gpio_control);
6852                                         val64 |= GPIO_CTRL_GPIO_0;
6853                                         writeq(val64, &bar0->gpio_control);
6854                                         val64 = readq(&bar0->gpio_control);
6855                                 } else {
6856                                         val64 |= ADAPTER_LED_ON;
6857                                         writeq(val64, &bar0->adapter_control);
6858                                 }
6859                                 nic->device_enabled_once = true;
6860                         } else {
6861                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6862                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6863                                 s2io_stop_all_tx_queue(nic);
6864                         }
6865                 }
6866                 val64 = readq(&bar0->adapter_control);
6867                 val64 |= ADAPTER_LED_ON;
6868                 writeq(val64, &bar0->adapter_control);
6869                 s2io_link(nic, LINK_UP);
6870         } else {
6871                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6872                                                       subid)) {
6873                         val64 = readq(&bar0->gpio_control);
6874                         val64 &= ~GPIO_CTRL_GPIO_0;
6875                         writeq(val64, &bar0->gpio_control);
6876                         val64 = readq(&bar0->gpio_control);
6877                 }
6878                 /* turn off LED */
6879                 val64 = readq(&bar0->adapter_control);
6880                 val64 = val64 & (~ADAPTER_LED_ON);
6881                 writeq(val64, &bar0->adapter_control);
6882                 s2io_link(nic, LINK_DOWN);
6883         }
6884         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6885
6886 out_unlock:
6887         rtnl_unlock();
6888 }
6889
6890 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6891                                   struct buffAdd *ba,
6892                                   struct sk_buff **skb, u64 *temp0, u64 *temp1,
6893                                   u64 *temp2, int size)
6894 {
6895         struct net_device *dev = sp->dev;
6896         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6897
6898         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6899                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6900                 /* allocate skb */
6901                 if (*skb) {
6902                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6903                         /*
6904                          * As Rx frame are not going to be processed,
6905                          * using same mapped address for the Rxd
6906                          * buffer pointer
6907                          */
6908                         rxdp1->Buffer0_ptr = *temp0;
6909                 } else {
6910                         *skb = dev_alloc_skb(size);
6911                         if (!(*skb)) {
6912                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6913                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6914                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6915                                 sp->mac_control.stats_info->sw_stat.
6916                                         mem_alloc_fail_cnt++;
6917                                 return -ENOMEM ;
6918                         }
6919                         sp->mac_control.stats_info->sw_stat.mem_allocated
6920                                 += (*skb)->truesize;
6921                         /* storing the mapped addr in a temp variable
6922                          * such it will be used for next rxd whose
6923                          * Host Control is NULL
6924                          */
6925                         rxdp1->Buffer0_ptr = *temp0 =
6926                                 pci_map_single(sp->pdev, (*skb)->data,
6927                                                size - NET_IP_ALIGN,
6928                                                PCI_DMA_FROMDEVICE);
6929                         if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6930                                 goto memalloc_failed;
6931                         rxdp->Host_Control = (unsigned long) (*skb);
6932                 }
6933         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6934                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6935                 /* Two buffer Mode */
6936                 if (*skb) {
6937                         rxdp3->Buffer2_ptr = *temp2;
6938                         rxdp3->Buffer0_ptr = *temp0;
6939                         rxdp3->Buffer1_ptr = *temp1;
6940                 } else {
6941                         *skb = dev_alloc_skb(size);
6942                         if (!(*skb)) {
6943                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6944                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6945                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6946                                 sp->mac_control.stats_info->sw_stat.
6947                                         mem_alloc_fail_cnt++;
6948                                 return -ENOMEM;
6949                         }
6950                         sp->mac_control.stats_info->sw_stat.mem_allocated
6951                                 += (*skb)->truesize;
6952                         rxdp3->Buffer2_ptr = *temp2 =
6953                                 pci_map_single(sp->pdev, (*skb)->data,
6954                                                dev->mtu + 4,
6955                                                PCI_DMA_FROMDEVICE);
6956                         if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6957                                 goto memalloc_failed;
6958                         rxdp3->Buffer0_ptr = *temp0 =
6959                                 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6960                                                PCI_DMA_FROMDEVICE);
6961                         if (pci_dma_mapping_error(sp->pdev,
6962                                                   rxdp3->Buffer0_ptr)) {
6963                                 pci_unmap_single(sp->pdev,
6964                                                  (dma_addr_t)rxdp3->Buffer2_ptr,
6965                                                  dev->mtu + 4,
6966                                                  PCI_DMA_FROMDEVICE);
6967                                 goto memalloc_failed;
6968                         }
6969                         rxdp->Host_Control = (unsigned long) (*skb);
6970
6971                         /* Buffer-1 will be dummy buffer not used */
6972                         rxdp3->Buffer1_ptr = *temp1 =
6973                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6974                                                PCI_DMA_FROMDEVICE);
6975                         if (pci_dma_mapping_error(sp->pdev,
6976                                                   rxdp3->Buffer1_ptr)) {
6977                                 pci_unmap_single(sp->pdev,
6978                                                  (dma_addr_t)rxdp3->Buffer0_ptr,
6979                                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
6980                                 pci_unmap_single(sp->pdev,
6981                                                  (dma_addr_t)rxdp3->Buffer2_ptr,
6982                                                  dev->mtu + 4,
6983                                                  PCI_DMA_FROMDEVICE);
6984                                 goto memalloc_failed;
6985                         }
6986                 }
6987         }
6988         return 0;
6989
6990 memalloc_failed:
6991         stats->pci_map_fail_cnt++;
6992         stats->mem_freed += (*skb)->truesize;
6993         dev_kfree_skb(*skb);
6994         return -ENOMEM;
6995 }
6996
6997 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6998                                 int size)
6999 {
7000         struct net_device *dev = sp->dev;
7001         if (sp->rxd_mode == RXD_MODE_1) {
7002                 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
7003         } else if (sp->rxd_mode == RXD_MODE_3B) {
7004                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
7005                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
7006                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
7007         }
7008 }
7009
7010 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
7011 {
7012         int i, j, k, blk_cnt = 0, size;
7013         struct mac_info *mac_control = &sp->mac_control;
7014         struct config_param *config = &sp->config;
7015         struct net_device *dev = sp->dev;
7016         struct RxD_t *rxdp = NULL;
7017         struct sk_buff *skb = NULL;
7018         struct buffAdd *ba = NULL;
7019         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
7020
7021         /* Calculate the size based on ring mode */
7022         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
7023                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
7024         if (sp->rxd_mode == RXD_MODE_1)
7025                 size += NET_IP_ALIGN;
7026         else if (sp->rxd_mode == RXD_MODE_3B)
7027                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
7028
7029         for (i = 0; i < config->rx_ring_num; i++) {
7030                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7031                 struct ring_info *ring = &mac_control->rings[i];
7032
7033                 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
7034
7035                 for (j = 0; j < blk_cnt; j++) {
7036                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
7037                                 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
7038                                 if (sp->rxd_mode == RXD_MODE_3B)
7039                                         ba = &ring->ba[j][k];
7040                                 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
7041                                                            (u64 *)&temp0_64,
7042                                                            (u64 *)&temp1_64,
7043                                                            (u64 *)&temp2_64,
7044                                                            size) == -ENOMEM) {
7045                                         return 0;
7046                                 }
7047
7048                                 set_rxd_buffer_size(sp, rxdp, size);
7049                                 wmb();
7050                                 /* flip the Ownership bit to Hardware */
7051                                 rxdp->Control_1 |= RXD_OWN_XENA;
7052                         }
7053                 }
7054         }
7055         return 0;
7056
7057 }
7058
7059 static int s2io_add_isr(struct s2io_nic *sp)
7060 {
7061         int ret = 0;
7062         struct net_device *dev = sp->dev;
7063         int err = 0;
7064
7065         if (sp->config.intr_type == MSI_X)
7066                 ret = s2io_enable_msi_x(sp);
7067         if (ret) {
7068                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7069                 sp->config.intr_type = INTA;
7070         }
7071
7072         /*
7073          * Store the values of the MSIX table in
7074          * the struct s2io_nic structure
7075          */
7076         store_xmsi_data(sp);
7077
7078         /* After proper initialization of H/W, register ISR */
7079         if (sp->config.intr_type == MSI_X) {
7080                 int i, msix_rx_cnt = 0;
7081
7082                 for (i = 0; i < sp->num_entries; i++) {
7083                         if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7084                                 if (sp->s2io_entries[i].type ==
7085                                     MSIX_RING_TYPE) {
7086                                         sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7087                                                 dev->name, i);
7088                                         err = request_irq(sp->entries[i].vector,
7089                                                           s2io_msix_ring_handle,
7090                                                           0,
7091                                                           sp->desc[i],
7092                                                           sp->s2io_entries[i].arg);
7093                                 } else if (sp->s2io_entries[i].type ==
7094                                            MSIX_ALARM_TYPE) {
7095                                         sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7096                                                 dev->name, i);
7097                                         err = request_irq(sp->entries[i].vector,
7098                                                           s2io_msix_fifo_handle,
7099                                                           0,
7100                                                           sp->desc[i],
7101                                                           sp->s2io_entries[i].arg);
7102
7103                                 }
7104                                 /* if either data or addr is zero print it. */
7105                                 if (!(sp->msix_info[i].addr &&
7106                                       sp->msix_info[i].data)) {
7107                                         DBG_PRINT(ERR_DBG,
7108                                                   "%s @Addr:0x%llx Data:0x%llx\n",
7109                                                   sp->desc[i],
7110                                                   (unsigned long long)
7111                                                   sp->msix_info[i].addr,
7112                                                   (unsigned long long)
7113                                                   ntohl(sp->msix_info[i].data));
7114                                 } else
7115                                         msix_rx_cnt++;
7116                                 if (err) {
7117                                         remove_msix_isr(sp);
7118
7119                                         DBG_PRINT(ERR_DBG,
7120                                                   "%s:MSI-X-%d registration "
7121                                                   "failed\n", dev->name, i);
7122
7123                                         DBG_PRINT(ERR_DBG,
7124                                                   "%s: Defaulting to INTA\n",
7125                                                   dev->name);
7126                                         sp->config.intr_type = INTA;
7127                                         break;
7128                                 }
7129                                 sp->s2io_entries[i].in_use =
7130                                         MSIX_REGISTERED_SUCCESS;
7131                         }
7132                 }
7133                 if (!err) {
7134                         pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7135                         DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7136                                   " through alarm vector\n");
7137                 }
7138         }
7139         if (sp->config.intr_type == INTA) {
7140                 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
7141                                   sp->name, dev);
7142                 if (err) {
7143                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7144                                   dev->name);
7145                         return -1;
7146                 }
7147         }
7148         return 0;
7149 }
7150
7151 static void s2io_rem_isr(struct s2io_nic *sp)
7152 {
7153         if (sp->config.intr_type == MSI_X)
7154                 remove_msix_isr(sp);
7155         else
7156                 remove_inta_isr(sp);
7157 }
7158
7159 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7160 {
7161         int cnt = 0;
7162         struct XENA_dev_config __iomem *bar0 = sp->bar0;
7163         register u64 val64 = 0;
7164         struct config_param *config;
7165         config = &sp->config;
7166
7167         if (!is_s2io_card_up(sp))
7168                 return;
7169
7170         del_timer_sync(&sp->alarm_timer);
7171         /* If s2io_set_link task is executing, wait till it completes. */
7172         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7173                 msleep(50);
7174         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7175
7176         /* Disable napi */
7177         if (sp->config.napi) {
7178                 int off = 0;
7179                 if (config->intr_type ==  MSI_X) {
7180                         for (; off < sp->config.rx_ring_num; off++)
7181                                 napi_disable(&sp->mac_control.rings[off].napi);
7182                 }
7183                 else
7184                         napi_disable(&sp->napi);
7185         }
7186
7187         /* disable Tx and Rx traffic on the NIC */
7188         if (do_io)
7189                 stop_nic(sp);
7190
7191         s2io_rem_isr(sp);
7192
7193         /* stop the tx queue, indicate link down */
7194         s2io_link(sp, LINK_DOWN);
7195
7196         /* Check if the device is Quiescent and then Reset the NIC */
7197         while (do_io) {
7198                 /* As per the HW requirement we need to replenish the
7199                  * receive buffer to avoid the ring bump. Since there is
7200                  * no intention of processing the Rx frame at this pointwe are
7201                  * just settting the ownership bit of rxd in Each Rx
7202                  * ring to HW and set the appropriate buffer size
7203                  * based on the ring mode
7204                  */
7205                 rxd_owner_bit_reset(sp);
7206
7207                 val64 = readq(&bar0->adapter_status);
7208                 if (verify_xena_quiescence(sp)) {
7209                         if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7210                                 break;
7211                 }
7212
7213                 msleep(50);
7214                 cnt++;
7215                 if (cnt == 10) {
7216                         DBG_PRINT(ERR_DBG, "s2io_close:Device not Quiescent ");
7217                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7218                                   (unsigned long long)val64);
7219                         break;
7220                 }
7221         }
7222         if (do_io)
7223                 s2io_reset(sp);
7224
7225         /* Free all Tx buffers */
7226         free_tx_buffers(sp);
7227
7228         /* Free all Rx buffers */
7229         free_rx_buffers(sp);
7230
7231         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7232 }
7233
7234 static void s2io_card_down(struct s2io_nic *sp)
7235 {
7236         do_s2io_card_down(sp, 1);
7237 }
7238
7239 static int s2io_card_up(struct s2io_nic *sp)
7240 {
7241         int i, ret = 0;
7242         struct mac_info *mac_control;
7243         struct config_param *config;
7244         struct net_device *dev = (struct net_device *)sp->dev;
7245         u16 interruptible;
7246
7247         /* Initialize the H/W I/O registers */
7248         ret = init_nic(sp);
7249         if (ret != 0) {
7250                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7251                           dev->name);
7252                 if (ret != -EIO)
7253                         s2io_reset(sp);
7254                 return ret;
7255         }
7256
7257         /*
7258          * Initializing the Rx buffers. For now we are considering only 1
7259          * Rx ring and initializing buffers into 30 Rx blocks
7260          */
7261         mac_control = &sp->mac_control;
7262         config = &sp->config;
7263
7264         for (i = 0; i < config->rx_ring_num; i++) {
7265                 struct ring_info *ring = &mac_control->rings[i];
7266
7267                 ring->mtu = dev->mtu;
7268                 ret = fill_rx_buffers(sp, ring, 1);
7269                 if (ret) {
7270                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7271                                   dev->name);
7272                         s2io_reset(sp);
7273                         free_rx_buffers(sp);
7274                         return -ENOMEM;
7275                 }
7276                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7277                           ring->rx_bufs_left);
7278         }
7279
7280         /* Initialise napi */
7281         if (config->napi) {
7282                 if (config->intr_type ==  MSI_X) {
7283                         for (i = 0; i < sp->config.rx_ring_num; i++)
7284                                 napi_enable(&sp->mac_control.rings[i].napi);
7285                 } else {
7286                         napi_enable(&sp->napi);
7287                 }
7288         }
7289
7290         /* Maintain the state prior to the open */
7291         if (sp->promisc_flg)
7292                 sp->promisc_flg = 0;
7293         if (sp->m_cast_flg) {
7294                 sp->m_cast_flg = 0;
7295                 sp->all_multi_pos = 0;
7296         }
7297
7298         /* Setting its receive mode */
7299         s2io_set_multicast(dev);
7300
7301         if (sp->lro) {
7302                 /* Initialize max aggregatable pkts per session based on MTU */
7303                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7304                 /* Check if we can use (if specified) user provided value */
7305                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7306                         sp->lro_max_aggr_per_sess = lro_max_pkts;
7307         }
7308
7309         /* Enable Rx Traffic and interrupts on the NIC */
7310         if (start_nic(sp)) {
7311                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7312                 s2io_reset(sp);
7313                 free_rx_buffers(sp);
7314                 return -ENODEV;
7315         }
7316
7317         /* Add interrupt service routine */
7318         if (s2io_add_isr(sp) != 0) {
7319                 if (sp->config.intr_type == MSI_X)
7320                         s2io_rem_isr(sp);
7321                 s2io_reset(sp);
7322                 free_rx_buffers(sp);
7323                 return -ENODEV;
7324         }
7325
7326         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7327
7328         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7329
7330         /*  Enable select interrupts */
7331         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7332         if (sp->config.intr_type != INTA) {
7333                 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7334                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7335         } else {
7336                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7337                 interruptible |= TX_PIC_INTR;
7338                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7339         }
7340
7341         return 0;
7342 }
7343
7344 /**
7345  * s2io_restart_nic - Resets the NIC.
7346  * @data : long pointer to the device private structure
7347  * Description:
7348  * This function is scheduled to be run by the s2io_tx_watchdog
7349  * function after 0.5 secs to reset the NIC. The idea is to reduce
7350  * the run time of the watch dog routine which is run holding a
7351  * spin lock.
7352  */
7353
7354 static void s2io_restart_nic(struct work_struct *work)
7355 {
7356         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7357         struct net_device *dev = sp->dev;
7358
7359         rtnl_lock();
7360
7361         if (!netif_running(dev))
7362                 goto out_unlock;
7363
7364         s2io_card_down(sp);
7365         if (s2io_card_up(sp)) {
7366                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7367         }
7368         s2io_wake_all_tx_queue(sp);
7369         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7370 out_unlock:
7371         rtnl_unlock();
7372 }
7373
7374 /**
7375  *  s2io_tx_watchdog - Watchdog for transmit side.
7376  *  @dev : Pointer to net device structure
7377  *  Description:
7378  *  This function is triggered if the Tx Queue is stopped
7379  *  for a pre-defined amount of time when the Interface is still up.
7380  *  If the Interface is jammed in such a situation, the hardware is
7381  *  reset (by s2io_close) and restarted again (by s2io_open) to
7382  *  overcome any problem that might have been caused in the hardware.
7383  *  Return value:
7384  *  void
7385  */
7386
7387 static void s2io_tx_watchdog(struct net_device *dev)
7388 {
7389         struct s2io_nic *sp = netdev_priv(dev);
7390
7391         if (netif_carrier_ok(dev)) {
7392                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7393                 schedule_work(&sp->rst_timer_task);
7394                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7395         }
7396 }
7397
7398 /**
7399  *   rx_osm_handler - To perform some OS related operations on SKB.
7400  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7401  *   @skb : the socket buffer pointer.
7402  *   @len : length of the packet
7403  *   @cksum : FCS checksum of the frame.
7404  *   @ring_no : the ring from which this RxD was extracted.
7405  *   Description:
7406  *   This function is called by the Rx interrupt serivce routine to perform
7407  *   some OS related operations on the SKB before passing it to the upper
7408  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7409  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7410  *   to the upper layer. If the checksum is wrong, it increments the Rx
7411  *   packet error count, frees the SKB and returns error.
7412  *   Return value:
7413  *   SUCCESS on success and -1 on failure.
7414  */
7415 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7416 {
7417         struct s2io_nic *sp = ring_data->nic;
7418         struct net_device *dev = (struct net_device *)ring_data->dev;
7419         struct sk_buff *skb = (struct sk_buff *)
7420                 ((unsigned long)rxdp->Host_Control);
7421         int ring_no = ring_data->ring_no;
7422         u16 l3_csum, l4_csum;
7423         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7424         struct lro *uninitialized_var(lro);
7425         u8 err_mask;
7426
7427         skb->dev = dev;
7428
7429         if (err) {
7430                 /* Check for parity error */
7431                 if (err & 0x1)
7432                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7433
7434                 err_mask = err >> 48;
7435                 switch (err_mask) {
7436                 case 1:
7437                         sp->mac_control.stats_info->sw_stat.rx_parity_err_cnt++;
7438                         break;
7439
7440                 case 2:
7441                         sp->mac_control.stats_info->sw_stat.rx_abort_cnt++;
7442                         break;
7443
7444                 case 3:
7445                         sp->mac_control.stats_info->sw_stat.rx_parity_abort_cnt++;
7446                         break;
7447
7448                 case 4:
7449                         sp->mac_control.stats_info->sw_stat.rx_rda_fail_cnt++;
7450                         break;
7451
7452                 case 5:
7453                         sp->mac_control.stats_info->sw_stat.rx_unkn_prot_cnt++;
7454                         break;
7455
7456                 case 6:
7457                         sp->mac_control.stats_info->sw_stat.rx_fcs_err_cnt++;
7458                         break;
7459
7460                 case 7:
7461                         sp->mac_control.stats_info->sw_stat.rx_buf_size_err_cnt++;
7462                         break;
7463
7464                 case 8:
7465                         sp->mac_control.stats_info->sw_stat.rx_rxd_corrupt_cnt++;
7466                         break;
7467
7468                 case 15:
7469                         sp->mac_control.stats_info->sw_stat.rx_unkn_err_cnt++;
7470                         break;
7471                 }
7472                 /*
7473                  * Drop the packet if bad transfer code. Exception being
7474                  * 0x5, which could be due to unsupported IPv6 extension header.
7475                  * In this case, we let stack handle the packet.
7476                  * Note that in this case, since checksum will be incorrect,
7477                  * stack will validate the same.
7478                  */
7479                 if (err_mask != 0x5) {
7480                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7481                                   dev->name, err_mask);
7482                         dev->stats.rx_crc_errors++;
7483                         sp->mac_control.stats_info->sw_stat.mem_freed
7484                                 += skb->truesize;
7485                         dev_kfree_skb(skb);
7486                         ring_data->rx_bufs_left -= 1;
7487                         rxdp->Host_Control = 0;
7488                         return 0;
7489                 }
7490         }
7491
7492         /* Updating statistics */
7493         ring_data->rx_packets++;
7494         rxdp->Host_Control = 0;
7495         if (sp->rxd_mode == RXD_MODE_1) {
7496                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7497
7498                 ring_data->rx_bytes += len;
7499                 skb_put(skb, len);
7500
7501         } else if (sp->rxd_mode == RXD_MODE_3B) {
7502                 int get_block = ring_data->rx_curr_get_info.block_index;
7503                 int get_off = ring_data->rx_curr_get_info.offset;
7504                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7505                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7506                 unsigned char *buff = skb_push(skb, buf0_len);
7507
7508                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7509                 ring_data->rx_bytes += buf0_len + buf2_len;
7510                 memcpy(buff, ba->ba_0, buf0_len);
7511                 skb_put(skb, buf2_len);
7512         }
7513
7514         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7515             ((!ring_data->lro) ||
7516              (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7517             (sp->rx_csum)) {
7518                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7519                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7520                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7521                         /*
7522                          * NIC verifies if the Checksum of the received
7523                          * frame is Ok or not and accordingly returns
7524                          * a flag in the RxD.
7525                          */
7526                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7527                         if (ring_data->lro) {
7528                                 u32 tcp_len;
7529                                 u8 *tcp;
7530                                 int ret = 0;
7531
7532                                 ret = s2io_club_tcp_session(ring_data,
7533                                                             skb->data, &tcp,
7534                                                             &tcp_len, &lro,
7535                                                             rxdp, sp);
7536                                 switch (ret) {
7537                                 case 3: /* Begin anew */
7538                                         lro->parent = skb;
7539                                         goto aggregate;
7540                                 case 1: /* Aggregate */
7541                                         lro_append_pkt(sp, lro, skb, tcp_len);
7542                                         goto aggregate;
7543                                 case 4: /* Flush session */
7544                                         lro_append_pkt(sp, lro, skb, tcp_len);
7545                                         queue_rx_frame(lro->parent,
7546                                                        lro->vlan_tag);
7547                                         clear_lro_session(lro);
7548                                         sp->mac_control.stats_info->
7549                                                 sw_stat.flush_max_pkts++;
7550                                         goto aggregate;
7551                                 case 2: /* Flush both */
7552                                         lro->parent->data_len = lro->frags_len;
7553                                         sp->mac_control.stats_info->
7554                                                 sw_stat.sending_both++;
7555                                         queue_rx_frame(lro->parent,
7556                                                        lro->vlan_tag);
7557                                         clear_lro_session(lro);
7558                                         goto send_up;
7559                                 case 0: /* sessions exceeded */
7560                                 case -1: /* non-TCP or not L2 aggregatable */
7561                                 case 5: /*
7562                                          * First pkt in session not
7563                                          * L3/L4 aggregatable
7564                                          */
7565                                         break;
7566                                 default:
7567                                         DBG_PRINT(ERR_DBG,
7568                                                   "%s: Samadhana!!\n",
7569                                                   __func__);
7570                                         BUG();
7571                                 }
7572                         }
7573                 } else {
7574                         /*
7575                          * Packet with erroneous checksum, let the
7576                          * upper layers deal with it.
7577                          */
7578                         skb->ip_summed = CHECKSUM_NONE;
7579                 }
7580         } else
7581                 skb->ip_summed = CHECKSUM_NONE;
7582
7583         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7584 send_up:
7585         skb_record_rx_queue(skb, ring_no);
7586         queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7587 aggregate:
7588         sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7589         return SUCCESS;
7590 }
7591
7592 /**
7593  *  s2io_link - stops/starts the Tx queue.
7594  *  @sp : private member of the device structure, which is a pointer to the
7595  *  s2io_nic structure.
7596  *  @link : inidicates whether link is UP/DOWN.
7597  *  Description:
7598  *  This function stops/starts the Tx queue depending on whether the link
7599  *  status of the NIC is is down or up. This is called by the Alarm
7600  *  interrupt handler whenever a link change interrupt comes up.
7601  *  Return value:
7602  *  void.
7603  */
7604
7605 static void s2io_link(struct s2io_nic *sp, int link)
7606 {
7607         struct net_device *dev = (struct net_device *)sp->dev;
7608
7609         if (link != sp->last_link_state) {
7610                 init_tti(sp, link);
7611                 if (link == LINK_DOWN) {
7612                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7613                         s2io_stop_all_tx_queue(sp);
7614                         netif_carrier_off(dev);
7615                         if (sp->mac_control.stats_info->sw_stat.link_up_cnt)
7616                                 sp->mac_control.stats_info->sw_stat.
7617                                         link_up_time = jiffies - sp->start_time;
7618                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7619                 } else {
7620                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7621                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7622                                 sp->mac_control.stats_info->
7623                                         sw_stat.link_down_time =
7624                                         jiffies - sp->start_time;
7625                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7626                         netif_carrier_on(dev);
7627                         s2io_wake_all_tx_queue(sp);
7628                 }
7629         }
7630         sp->last_link_state = link;
7631         sp->start_time = jiffies;
7632 }
7633
7634 /**
7635  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7636  *  @sp : private member of the device structure, which is a pointer to the
7637  *  s2io_nic structure.
7638  *  Description:
7639  *  This function initializes a few of the PCI and PCI-X configuration registers
7640  *  with recommended values.
7641  *  Return value:
7642  *  void
7643  */
7644
7645 static void s2io_init_pci(struct s2io_nic *sp)
7646 {
7647         u16 pci_cmd = 0, pcix_cmd = 0;
7648
7649         /* Enable Data Parity Error Recovery in PCI-X command register. */
7650         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7651                              &(pcix_cmd));
7652         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7653                               (pcix_cmd | 1));
7654         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7655                              &(pcix_cmd));
7656
7657         /* Set the PErr Response bit in PCI command register. */
7658         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7659         pci_write_config_word(sp->pdev, PCI_COMMAND,
7660                               (pci_cmd | PCI_COMMAND_PARITY));
7661         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7662 }
7663
7664 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7665                             u8 *dev_multiq)
7666 {
7667         if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7668                 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7669                           "(%d) not supported\n", tx_fifo_num);
7670
7671                 if (tx_fifo_num < 1)
7672                         tx_fifo_num = 1;
7673                 else
7674                         tx_fifo_num = MAX_TX_FIFOS;
7675
7676                 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7677                 DBG_PRINT(ERR_DBG, "tx fifos\n");
7678         }
7679
7680         if (multiq)
7681                 *dev_multiq = multiq;
7682
7683         if (tx_steering_type && (1 == tx_fifo_num)) {
7684                 if (tx_steering_type != TX_DEFAULT_STEERING)
7685                         DBG_PRINT(ERR_DBG,
7686                                   "s2io: Tx steering is not supported with "
7687                                   "one fifo. Disabling Tx steering.\n");
7688                 tx_steering_type = NO_STEERING;
7689         }
7690
7691         if ((tx_steering_type < NO_STEERING) ||
7692             (tx_steering_type > TX_DEFAULT_STEERING)) {
7693                 DBG_PRINT(ERR_DBG,
7694                           "s2io: Requested transmit steering not supported\n");
7695                 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7696                 tx_steering_type = NO_STEERING;
7697         }
7698
7699         if (rx_ring_num > MAX_RX_RINGS) {
7700                 DBG_PRINT(ERR_DBG,
7701                           "s2io: Requested number of rx rings not supported\n");
7702                 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7703                           MAX_RX_RINGS);
7704                 rx_ring_num = MAX_RX_RINGS;
7705         }
7706
7707         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7708                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7709                           "Defaulting to INTA\n");
7710                 *dev_intr_type = INTA;
7711         }
7712
7713         if ((*dev_intr_type == MSI_X) &&
7714             ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7715              (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7716                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7717                           "Defaulting to INTA\n");
7718                 *dev_intr_type = INTA;
7719         }
7720
7721         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7722                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7723                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7724                 rx_ring_mode = 1;
7725         }
7726         return SUCCESS;
7727 }
7728
7729 /**
7730  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7731  * or Traffic class respectively.
7732  * @nic: device private variable
7733  * Description: The function configures the receive steering to
7734  * desired receive ring.
7735  * Return Value:  SUCCESS on success and
7736  * '-1' on failure (endian settings incorrect).
7737  */
7738 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7739 {
7740         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7741         register u64 val64 = 0;
7742
7743         if (ds_codepoint > 63)
7744                 return FAILURE;
7745
7746         val64 = RTS_DS_MEM_DATA(ring);
7747         writeq(val64, &bar0->rts_ds_mem_data);
7748
7749         val64 = RTS_DS_MEM_CTRL_WE |
7750                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7751                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7752
7753         writeq(val64, &bar0->rts_ds_mem_ctrl);
7754
7755         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7756                                      RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7757                                      S2IO_BIT_RESET);
7758 }
7759
7760 static const struct net_device_ops s2io_netdev_ops = {
7761         .ndo_open               = s2io_open,
7762         .ndo_stop               = s2io_close,
7763         .ndo_get_stats          = s2io_get_stats,
7764         .ndo_start_xmit         = s2io_xmit,
7765         .ndo_validate_addr      = eth_validate_addr,
7766         .ndo_set_multicast_list = s2io_set_multicast,
7767         .ndo_do_ioctl           = s2io_ioctl,
7768         .ndo_set_mac_address    = s2io_set_mac_addr,
7769         .ndo_change_mtu         = s2io_change_mtu,
7770         .ndo_vlan_rx_register   = s2io_vlan_rx_register,
7771         .ndo_vlan_rx_kill_vid   = s2io_vlan_rx_kill_vid,
7772         .ndo_tx_timeout         = s2io_tx_watchdog,
7773 #ifdef CONFIG_NET_POLL_CONTROLLER
7774         .ndo_poll_controller    = s2io_netpoll,
7775 #endif
7776 };
7777
7778 /**
7779  *  s2io_init_nic - Initialization of the adapter .
7780  *  @pdev : structure containing the PCI related information of the device.
7781  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7782  *  Description:
7783  *  The function initializes an adapter identified by the pci_dec structure.
7784  *  All OS related initialization including memory and device structure and
7785  *  initlaization of the device private variable is done. Also the swapper
7786  *  control register is initialized to enable read and write into the I/O
7787  *  registers of the device.
7788  *  Return value:
7789  *  returns 0 on success and negative on failure.
7790  */
7791
7792 static int __devinit
7793 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7794 {
7795         struct s2io_nic *sp;
7796         struct net_device *dev;
7797         int i, j, ret;
7798         int dma_flag = false;
7799         u32 mac_up, mac_down;
7800         u64 val64 = 0, tmp64 = 0;
7801         struct XENA_dev_config __iomem *bar0 = NULL;
7802         u16 subid;
7803         struct mac_info *mac_control;
7804         struct config_param *config;
7805         int mode;
7806         u8 dev_intr_type = intr_type;
7807         u8 dev_multiq = 0;
7808
7809         ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7810         if (ret)
7811                 return ret;
7812
7813         ret = pci_enable_device(pdev);
7814         if (ret) {
7815                 DBG_PRINT(ERR_DBG,
7816                           "s2io_init_nic: pci_enable_device failed\n");
7817                 return ret;
7818         }
7819
7820         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7821                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7822                 dma_flag = true;
7823                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7824                         DBG_PRINT(ERR_DBG,
7825                                   "Unable to obtain 64bit DMA "
7826                                   "for consistent allocations\n");
7827                         pci_disable_device(pdev);
7828                         return -ENOMEM;
7829                 }
7830         } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7831                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7832         } else {
7833                 pci_disable_device(pdev);
7834                 return -ENOMEM;
7835         }
7836         ret = pci_request_regions(pdev, s2io_driver_name);
7837         if (ret) {
7838                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n",
7839                           __func__, ret);
7840                 pci_disable_device(pdev);
7841                 return -ENODEV;
7842         }
7843         if (dev_multiq)
7844                 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7845         else
7846                 dev = alloc_etherdev(sizeof(struct s2io_nic));
7847         if (dev == NULL) {
7848                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7849                 pci_disable_device(pdev);
7850                 pci_release_regions(pdev);
7851                 return -ENODEV;
7852         }
7853
7854         pci_set_master(pdev);
7855         pci_set_drvdata(pdev, dev);
7856         SET_NETDEV_DEV(dev, &pdev->dev);
7857
7858         /*  Private member variable initialized to s2io NIC structure */
7859         sp = netdev_priv(dev);
7860         memset(sp, 0, sizeof(struct s2io_nic));
7861         sp->dev = dev;
7862         sp->pdev = pdev;
7863         sp->high_dma_flag = dma_flag;
7864         sp->device_enabled_once = false;
7865         if (rx_ring_mode == 1)
7866                 sp->rxd_mode = RXD_MODE_1;
7867         if (rx_ring_mode == 2)
7868                 sp->rxd_mode = RXD_MODE_3B;
7869
7870         sp->config.intr_type = dev_intr_type;
7871
7872         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7873             (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7874                 sp->device_type = XFRAME_II_DEVICE;
7875         else
7876                 sp->device_type = XFRAME_I_DEVICE;
7877
7878         sp->lro = lro_enable;
7879
7880         /* Initialize some PCI/PCI-X fields of the NIC. */
7881         s2io_init_pci(sp);
7882
7883         /*
7884          * Setting the device configuration parameters.
7885          * Most of these parameters can be specified by the user during
7886          * module insertion as they are module loadable parameters. If
7887          * these parameters are not not specified during load time, they
7888          * are initialized with default values.
7889          */
7890         mac_control = &sp->mac_control;
7891         config = &sp->config;
7892
7893         config->napi = napi;
7894         config->tx_steering_type = tx_steering_type;
7895
7896         /* Tx side parameters. */
7897         if (config->tx_steering_type == TX_PRIORITY_STEERING)
7898                 config->tx_fifo_num = MAX_TX_FIFOS;
7899         else
7900                 config->tx_fifo_num = tx_fifo_num;
7901
7902         /* Initialize the fifos used for tx steering */
7903         if (config->tx_fifo_num < 5) {
7904                 if (config->tx_fifo_num  == 1)
7905                         sp->total_tcp_fifos = 1;
7906                 else
7907                         sp->total_tcp_fifos = config->tx_fifo_num - 1;
7908                 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7909                 sp->total_udp_fifos = 1;
7910                 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7911         } else {
7912                 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7913                                        FIFO_OTHER_MAX_NUM);
7914                 sp->udp_fifo_idx = sp->total_tcp_fifos;
7915                 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7916                 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7917         }
7918
7919         config->multiq = dev_multiq;
7920         for (i = 0; i < config->tx_fifo_num; i++) {
7921                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7922
7923                 tx_cfg->fifo_len = tx_fifo_len[i];
7924                 tx_cfg->fifo_priority = i;
7925         }
7926
7927         /* mapping the QoS priority to the configured fifos */
7928         for (i = 0; i < MAX_TX_FIFOS; i++)
7929                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7930
7931         /* map the hashing selector table to the configured fifos */
7932         for (i = 0; i < config->tx_fifo_num; i++)
7933                 sp->fifo_selector[i] = fifo_selector[i];
7934
7935
7936         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7937         for (i = 0; i < config->tx_fifo_num; i++) {
7938                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7939
7940                 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7941                 if (tx_cfg->fifo_len < 65) {
7942                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7943                         break;
7944                 }
7945         }
7946         /* + 2 because one Txd for skb->data and one Txd for UFO */
7947         config->max_txds = MAX_SKB_FRAGS + 2;
7948
7949         /* Rx side parameters. */
7950         config->rx_ring_num = rx_ring_num;
7951         for (i = 0; i < config->rx_ring_num; i++) {
7952                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7953                 struct ring_info *ring = &mac_control->rings[i];
7954
7955                 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7956                 rx_cfg->ring_priority = i;
7957                 ring->rx_bufs_left = 0;
7958                 ring->rxd_mode = sp->rxd_mode;
7959                 ring->rxd_count = rxd_count[sp->rxd_mode];
7960                 ring->pdev = sp->pdev;
7961                 ring->dev = sp->dev;
7962         }
7963
7964         for (i = 0; i < rx_ring_num; i++) {
7965                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7966
7967                 rx_cfg->ring_org = RING_ORG_BUFF1;
7968                 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7969         }
7970
7971         /*  Setting Mac Control parameters */
7972         mac_control->rmac_pause_time = rmac_pause_time;
7973         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7974         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7975
7976
7977         /*  initialize the shared memory used by the NIC and the host */
7978         if (init_shared_mem(sp)) {
7979                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7980                 ret = -ENOMEM;
7981                 goto mem_alloc_failed;
7982         }
7983
7984         sp->bar0 = pci_ioremap_bar(pdev, 0);
7985         if (!sp->bar0) {
7986                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7987                           dev->name);
7988                 ret = -ENOMEM;
7989                 goto bar0_remap_failed;
7990         }
7991
7992         sp->bar1 = pci_ioremap_bar(pdev, 2);
7993         if (!sp->bar1) {
7994                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7995                           dev->name);
7996                 ret = -ENOMEM;
7997                 goto bar1_remap_failed;
7998         }
7999
8000         dev->irq = pdev->irq;
8001         dev->base_addr = (unsigned long)sp->bar0;
8002
8003         /* Initializing the BAR1 address as the start of the FIFO pointer. */
8004         for (j = 0; j < MAX_TX_FIFOS; j++) {
8005                 mac_control->tx_FIFO_start[j] =
8006                         (struct TxFIFO_element __iomem *)
8007                         (sp->bar1 + (j * 0x00020000));
8008         }
8009
8010         /*  Driver entry points */
8011         dev->netdev_ops = &s2io_netdev_ops;
8012         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
8013         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8014
8015         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8016         if (sp->high_dma_flag == true)
8017                 dev->features |= NETIF_F_HIGHDMA;
8018         dev->features |= NETIF_F_TSO;
8019         dev->features |= NETIF_F_TSO6;
8020         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
8021                 dev->features |= NETIF_F_UFO;
8022                 dev->features |= NETIF_F_HW_CSUM;
8023         }
8024         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
8025         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8026         INIT_WORK(&sp->set_link_task, s2io_set_link);
8027
8028         pci_save_state(sp->pdev);
8029
8030         /* Setting swapper control on the NIC, for proper reset operation */
8031         if (s2io_set_swapper(sp)) {
8032                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
8033                           dev->name);
8034                 ret = -EAGAIN;
8035                 goto set_swap_failed;
8036         }
8037
8038         /* Verify if the Herc works on the slot its placed into */
8039         if (sp->device_type & XFRAME_II_DEVICE) {
8040                 mode = s2io_verify_pci_mode(sp);
8041                 if (mode < 0) {
8042                         DBG_PRINT(ERR_DBG, "%s: ", __func__);
8043                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8044                         ret = -EBADSLT;
8045                         goto set_swap_failed;
8046                 }
8047         }
8048
8049         if (sp->config.intr_type == MSI_X) {
8050                 sp->num_entries = config->rx_ring_num + 1;
8051                 ret = s2io_enable_msi_x(sp);
8052
8053                 if (!ret) {
8054                         ret = s2io_test_msi(sp);
8055                         /* rollback MSI-X, will re-enable during add_isr() */
8056                         remove_msix_isr(sp);
8057                 }
8058                 if (ret) {
8059
8060                         DBG_PRINT(ERR_DBG,
8061                                   "s2io: MSI-X requested but failed to enable\n");
8062                         sp->config.intr_type = INTA;
8063                 }
8064         }
8065
8066         if (config->intr_type ==  MSI_X) {
8067                 for (i = 0; i < config->rx_ring_num ; i++) {
8068                         struct ring_info *ring = &mac_control->rings[i];
8069
8070                         netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
8071                 }
8072         } else {
8073                 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8074         }
8075
8076         /* Not needed for Herc */
8077         if (sp->device_type & XFRAME_I_DEVICE) {
8078                 /*
8079                  * Fix for all "FFs" MAC address problems observed on
8080                  * Alpha platforms
8081                  */
8082                 fix_mac_address(sp);
8083                 s2io_reset(sp);
8084         }
8085
8086         /*
8087          * MAC address initialization.
8088          * For now only one mac address will be read and used.
8089          */
8090         bar0 = sp->bar0;
8091         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8092                 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8093         writeq(val64, &bar0->rmac_addr_cmd_mem);
8094         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8095                               RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8096                               S2IO_BIT_RESET);
8097         tmp64 = readq(&bar0->rmac_addr_data0_mem);
8098         mac_down = (u32)tmp64;
8099         mac_up = (u32) (tmp64 >> 32);
8100
8101         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8102         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8103         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8104         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8105         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8106         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8107
8108         /*  Set the factory defined MAC address initially   */
8109         dev->addr_len = ETH_ALEN;
8110         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8111         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8112
8113         /* initialize number of multicast & unicast MAC entries variables */
8114         if (sp->device_type == XFRAME_I_DEVICE) {
8115                 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8116                 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8117                 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8118         } else if (sp->device_type == XFRAME_II_DEVICE) {
8119                 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8120                 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8121                 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8122         }
8123
8124         /* store mac addresses from CAM to s2io_nic structure */
8125         do_s2io_store_unicast_mc(sp);
8126
8127         /* Configure MSIX vector for number of rings configured plus one */
8128         if ((sp->device_type == XFRAME_II_DEVICE) &&
8129             (config->intr_type == MSI_X))
8130                 sp->num_entries = config->rx_ring_num + 1;
8131
8132         /* Store the values of the MSIX table in the s2io_nic structure */
8133         store_xmsi_data(sp);
8134         /* reset Nic and bring it to known state */
8135         s2io_reset(sp);
8136
8137         /*
8138          * Initialize link state flags
8139          * and the card state parameter
8140          */
8141         sp->state = 0;
8142
8143         /* Initialize spinlocks */
8144         for (i = 0; i < sp->config.tx_fifo_num; i++) {
8145                 struct fifo_info *fifo = &mac_control->fifos[i];
8146
8147                 spin_lock_init(&fifo->tx_lock);
8148         }
8149
8150         /*
8151          * SXE-002: Configure link and activity LED to init state
8152          * on driver load.
8153          */
8154         subid = sp->pdev->subsystem_device;
8155         if ((subid & 0xFF) >= 0x07) {
8156                 val64 = readq(&bar0->gpio_control);
8157                 val64 |= 0x0000800000000000ULL;
8158                 writeq(val64, &bar0->gpio_control);
8159                 val64 = 0x0411040400000000ULL;
8160                 writeq(val64, (void __iomem *)bar0 + 0x2700);
8161                 val64 = readq(&bar0->gpio_control);
8162         }
8163
8164         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
8165
8166         if (register_netdev(dev)) {
8167                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8168                 ret = -ENODEV;
8169                 goto register_failed;
8170         }
8171         s2io_vpd_read(sp);
8172         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8173         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8174                   sp->product_name, pdev->revision);
8175         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8176                   s2io_driver_version);
8177         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %pM\n", dev->name, dev->dev_addr);
8178         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8179         if (sp->device_type & XFRAME_II_DEVICE) {
8180                 mode = s2io_print_pci_mode(sp);
8181                 if (mode < 0) {
8182                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8183                         ret = -EBADSLT;
8184                         unregister_netdev(dev);
8185                         goto set_swap_failed;
8186                 }
8187         }
8188         switch (sp->rxd_mode) {
8189         case RXD_MODE_1:
8190                 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8191                           dev->name);
8192                 break;
8193         case RXD_MODE_3B:
8194                 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8195                           dev->name);
8196                 break;
8197         }
8198
8199         switch (sp->config.napi) {
8200         case 0:
8201                 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8202                 break;
8203         case 1:
8204                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8205                 break;
8206         }
8207
8208         DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8209                   sp->config.tx_fifo_num);
8210
8211         DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8212                   sp->config.rx_ring_num);
8213
8214         switch (sp->config.intr_type) {
8215         case INTA:
8216                 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8217                 break;
8218         case MSI_X:
8219                 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8220                 break;
8221         }
8222         if (sp->config.multiq) {
8223                 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8224                         struct fifo_info *fifo = &mac_control->fifos[i];
8225
8226                         fifo->multiq = config->multiq;
8227                 }
8228                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8229                           dev->name);
8230         } else
8231                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8232                           dev->name);
8233
8234         switch (sp->config.tx_steering_type) {
8235         case NO_STEERING:
8236                 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8237                           dev->name);
8238                 break;
8239         case TX_PRIORITY_STEERING:
8240                 DBG_PRINT(ERR_DBG,
8241                           "%s: Priority steering enabled for transmit\n",
8242                           dev->name);
8243                 break;
8244         case TX_DEFAULT_STEERING:
8245                 DBG_PRINT(ERR_DBG,
8246                           "%s: Default steering enabled for transmit\n",
8247                           dev->name);
8248         }
8249
8250         if (sp->lro)
8251                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8252                           dev->name);
8253         if (ufo)
8254                 DBG_PRINT(ERR_DBG,
8255                           "%s: UDP Fragmentation Offload(UFO) enabled\n",
8256                           dev->name);
8257         /* Initialize device name */
8258         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8259
8260         if (vlan_tag_strip)
8261                 sp->vlan_strip_flag = 1;
8262         else
8263                 sp->vlan_strip_flag = 0;
8264
8265         /*
8266          * Make Link state as off at this point, when the Link change
8267          * interrupt comes the state will be automatically changed to
8268          * the right state.
8269          */
8270         netif_carrier_off(dev);
8271
8272         return 0;
8273
8274 register_failed:
8275 set_swap_failed:
8276         iounmap(sp->bar1);
8277 bar1_remap_failed:
8278         iounmap(sp->bar0);
8279 bar0_remap_failed:
8280 mem_alloc_failed:
8281         free_shared_mem(sp);
8282         pci_disable_device(pdev);
8283         pci_release_regions(pdev);
8284         pci_set_drvdata(pdev, NULL);
8285         free_netdev(dev);
8286
8287         return ret;
8288 }
8289
8290 /**
8291  * s2io_rem_nic - Free the PCI device
8292  * @pdev: structure containing the PCI related information of the device.
8293  * Description: This function is called by the Pci subsystem to release a
8294  * PCI device and free up all resource held up by the device. This could
8295  * be in response to a Hot plug event or when the driver is to be removed
8296  * from memory.
8297  */
8298
8299 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8300 {
8301         struct net_device *dev =
8302                 (struct net_device *)pci_get_drvdata(pdev);
8303         struct s2io_nic *sp;
8304
8305         if (dev == NULL) {
8306                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8307                 return;
8308         }
8309
8310         flush_scheduled_work();
8311
8312         sp = netdev_priv(dev);
8313         unregister_netdev(dev);
8314
8315         free_shared_mem(sp);
8316         iounmap(sp->bar0);
8317         iounmap(sp->bar1);
8318         pci_release_regions(pdev);
8319         pci_set_drvdata(pdev, NULL);
8320         free_netdev(dev);
8321         pci_disable_device(pdev);
8322 }
8323
8324 /**
8325  * s2io_starter - Entry point for the driver
8326  * Description: This function is the entry point for the driver. It verifies
8327  * the module loadable parameters and initializes PCI configuration space.
8328  */
8329
8330 static int __init s2io_starter(void)
8331 {
8332         return pci_register_driver(&s2io_driver);
8333 }
8334
8335 /**
8336  * s2io_closer - Cleanup routine for the driver
8337  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8338  */
8339
8340 static __exit void s2io_closer(void)
8341 {
8342         pci_unregister_driver(&s2io_driver);
8343         DBG_PRINT(INIT_DBG, "cleanup done\n");
8344 }
8345
8346 module_init(s2io_starter);
8347 module_exit(s2io_closer);
8348
8349 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8350                                 struct tcphdr **tcp, struct RxD_t *rxdp,
8351                                 struct s2io_nic *sp)
8352 {
8353         int ip_off;
8354         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8355
8356         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8357                 DBG_PRINT(INIT_DBG,
8358                           "%s: Non-TCP frames not supported for LRO\n",
8359                           __func__);
8360                 return -1;
8361         }
8362
8363         /* Checking for DIX type or DIX type with VLAN */
8364         if ((l2_type == 0) || (l2_type == 4)) {
8365                 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8366                 /*
8367                  * If vlan stripping is disabled and the frame is VLAN tagged,
8368                  * shift the offset by the VLAN header size bytes.
8369                  */
8370                 if ((!sp->vlan_strip_flag) &&
8371                     (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8372                         ip_off += HEADER_VLAN_SIZE;
8373         } else {
8374                 /* LLC, SNAP etc are considered non-mergeable */
8375                 return -1;
8376         }
8377
8378         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8379         ip_len = (u8)((*ip)->ihl);
8380         ip_len <<= 2;
8381         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8382
8383         return 0;
8384 }
8385
8386 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8387                                   struct tcphdr *tcp)
8388 {
8389         DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8390         if ((lro->iph->saddr != ip->saddr) ||
8391             (lro->iph->daddr != ip->daddr) ||
8392             (lro->tcph->source != tcp->source) ||
8393             (lro->tcph->dest != tcp->dest))
8394                 return -1;
8395         return 0;
8396 }
8397
8398 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8399 {
8400         return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8401 }
8402
8403 static void initiate_new_session(struct lro *lro, u8 *l2h,
8404                                  struct iphdr *ip, struct tcphdr *tcp,
8405                                  u32 tcp_pyld_len, u16 vlan_tag)
8406 {
8407         DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8408         lro->l2h = l2h;
8409         lro->iph = ip;
8410         lro->tcph = tcp;
8411         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8412         lro->tcp_ack = tcp->ack_seq;
8413         lro->sg_num = 1;
8414         lro->total_len = ntohs(ip->tot_len);
8415         lro->frags_len = 0;
8416         lro->vlan_tag = vlan_tag;
8417         /*
8418          * Check if we saw TCP timestamp.
8419          * Other consistency checks have already been done.
8420          */
8421         if (tcp->doff == 8) {
8422                 __be32 *ptr;
8423                 ptr = (__be32 *)(tcp+1);
8424                 lro->saw_ts = 1;
8425                 lro->cur_tsval = ntohl(*(ptr+1));
8426                 lro->cur_tsecr = *(ptr+2);
8427         }
8428         lro->in_use = 1;
8429 }
8430
8431 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8432 {
8433         struct iphdr *ip = lro->iph;
8434         struct tcphdr *tcp = lro->tcph;
8435         __sum16 nchk;
8436         struct stat_block *statinfo = sp->mac_control.stats_info;
8437         DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8438
8439         /* Update L3 header */
8440         ip->tot_len = htons(lro->total_len);
8441         ip->check = 0;
8442         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8443         ip->check = nchk;
8444
8445         /* Update L4 header */
8446         tcp->ack_seq = lro->tcp_ack;
8447         tcp->window = lro->window;
8448
8449         /* Update tsecr field if this session has timestamps enabled */
8450         if (lro->saw_ts) {
8451                 __be32 *ptr = (__be32 *)(tcp + 1);
8452                 *(ptr+2) = lro->cur_tsecr;
8453         }
8454
8455         /* Update counters required for calculation of
8456          * average no. of packets aggregated.
8457          */
8458         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8459         statinfo->sw_stat.num_aggregations++;
8460 }
8461
8462 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8463                              struct tcphdr *tcp, u32 l4_pyld)
8464 {
8465         DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8466         lro->total_len += l4_pyld;
8467         lro->frags_len += l4_pyld;
8468         lro->tcp_next_seq += l4_pyld;
8469         lro->sg_num++;
8470
8471         /* Update ack seq no. and window ad(from this pkt) in LRO object */
8472         lro->tcp_ack = tcp->ack_seq;
8473         lro->window = tcp->window;
8474
8475         if (lro->saw_ts) {
8476                 __be32 *ptr;
8477                 /* Update tsecr and tsval from this packet */
8478                 ptr = (__be32 *)(tcp+1);
8479                 lro->cur_tsval = ntohl(*(ptr+1));
8480                 lro->cur_tsecr = *(ptr + 2);
8481         }
8482 }
8483
8484 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8485                                     struct tcphdr *tcp, u32 tcp_pyld_len)
8486 {
8487         u8 *ptr;
8488
8489         DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8490
8491         if (!tcp_pyld_len) {
8492                 /* Runt frame or a pure ack */
8493                 return -1;
8494         }
8495
8496         if (ip->ihl != 5) /* IP has options */
8497                 return -1;
8498
8499         /* If we see CE codepoint in IP header, packet is not mergeable */
8500         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8501                 return -1;
8502
8503         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8504         if (tcp->urg || tcp->psh || tcp->rst ||
8505             tcp->syn || tcp->fin ||
8506             tcp->ece || tcp->cwr || !tcp->ack) {
8507                 /*
8508                  * Currently recognize only the ack control word and
8509                  * any other control field being set would result in
8510                  * flushing the LRO session
8511                  */
8512                 return -1;
8513         }
8514
8515         /*
8516          * Allow only one TCP timestamp option. Don't aggregate if
8517          * any other options are detected.
8518          */
8519         if (tcp->doff != 5 && tcp->doff != 8)
8520                 return -1;
8521
8522         if (tcp->doff == 8) {
8523                 ptr = (u8 *)(tcp + 1);
8524                 while (*ptr == TCPOPT_NOP)
8525                         ptr++;
8526                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8527                         return -1;
8528
8529                 /* Ensure timestamp value increases monotonically */
8530                 if (l_lro)
8531                         if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8532                                 return -1;
8533
8534                 /* timestamp echo reply should be non-zero */
8535                 if (*((__be32 *)(ptr+6)) == 0)
8536                         return -1;
8537         }
8538
8539         return 0;
8540 }
8541
8542 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8543                                  u8 **tcp, u32 *tcp_len, struct lro **lro,
8544                                  struct RxD_t *rxdp, struct s2io_nic *sp)
8545 {
8546         struct iphdr *ip;
8547         struct tcphdr *tcph;
8548         int ret = 0, i;
8549         u16 vlan_tag = 0;
8550
8551         ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8552                                    rxdp, sp);
8553         if (ret)
8554                 return ret;
8555
8556         DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8557
8558         vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8559         tcph = (struct tcphdr *)*tcp;
8560         *tcp_len = get_l4_pyld_length(ip, tcph);
8561         for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8562                 struct lro *l_lro = &ring_data->lro0_n[i];
8563                 if (l_lro->in_use) {
8564                         if (check_for_socket_match(l_lro, ip, tcph))
8565                                 continue;
8566                         /* Sock pair matched */
8567                         *lro = l_lro;
8568
8569                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8570                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8571                                           "0x%x, actual 0x%x\n", __func__,
8572                                           (*lro)->tcp_next_seq,
8573                                           ntohl(tcph->seq));
8574
8575                                 sp->mac_control.stats_info->
8576                                         sw_stat.outof_sequence_pkts++;
8577                                 ret = 2;
8578                                 break;
8579                         }
8580
8581                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8582                                                       *tcp_len))
8583                                 ret = 1; /* Aggregate */
8584                         else
8585                                 ret = 2; /* Flush both */
8586                         break;
8587                 }
8588         }
8589
8590         if (ret == 0) {
8591                 /* Before searching for available LRO objects,
8592                  * check if the pkt is L3/L4 aggregatable. If not
8593                  * don't create new LRO session. Just send this
8594                  * packet up.
8595                  */
8596                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8597                         return 5;
8598
8599                 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8600                         struct lro *l_lro = &ring_data->lro0_n[i];
8601                         if (!(l_lro->in_use)) {
8602                                 *lro = l_lro;
8603                                 ret = 3; /* Begin anew */
8604                                 break;
8605                         }
8606                 }
8607         }
8608
8609         if (ret == 0) { /* sessions exceeded */
8610                 DBG_PRINT(INFO_DBG, "%s:All LRO sessions already in use\n",
8611                           __func__);
8612                 *lro = NULL;
8613                 return ret;
8614         }
8615
8616         switch (ret) {
8617         case 3:
8618                 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8619                                      vlan_tag);
8620                 break;
8621         case 2:
8622                 update_L3L4_header(sp, *lro);
8623                 break;
8624         case 1:
8625                 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8626                 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8627                         update_L3L4_header(sp, *lro);
8628                         ret = 4; /* Flush the LRO */
8629                 }
8630                 break;
8631         default:
8632                 DBG_PRINT(ERR_DBG, "%s:Dont know, can't say!!\n", __func__);
8633                 break;
8634         }
8635
8636         return ret;
8637 }
8638
8639 static void clear_lro_session(struct lro *lro)
8640 {
8641         static u16 lro_struct_size = sizeof(struct lro);
8642
8643         memset(lro, 0, lro_struct_size);
8644 }
8645
8646 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8647 {
8648         struct net_device *dev = skb->dev;
8649         struct s2io_nic *sp = netdev_priv(dev);
8650
8651         skb->protocol = eth_type_trans(skb, dev);
8652         if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) {
8653                 /* Queueing the vlan frame to the upper layer */
8654                 if (sp->config.napi)
8655                         vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8656                 else
8657                         vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8658         } else {
8659                 if (sp->config.napi)
8660                         netif_receive_skb(skb);
8661                 else
8662                         netif_rx(skb);
8663         }
8664 }
8665
8666 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8667                            struct sk_buff *skb, u32 tcp_len)
8668 {
8669         struct sk_buff *first = lro->parent;
8670
8671         first->len += tcp_len;
8672         first->data_len = lro->frags_len;
8673         skb_pull(skb, (skb->len - tcp_len));
8674         if (skb_shinfo(first)->frag_list)
8675                 lro->last_frag->next = skb;
8676         else
8677                 skb_shinfo(first)->frag_list = skb;
8678         first->truesize += skb->truesize;
8679         lro->last_frag = skb;
8680         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8681         return;
8682 }
8683
8684 /**
8685  * s2io_io_error_detected - called when PCI error is detected
8686  * @pdev: Pointer to PCI device
8687  * @state: The current pci connection state
8688  *
8689  * This function is called after a PCI bus error affecting
8690  * this device has been detected.
8691  */
8692 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8693                                                pci_channel_state_t state)
8694 {
8695         struct net_device *netdev = pci_get_drvdata(pdev);
8696         struct s2io_nic *sp = netdev_priv(netdev);
8697
8698         netif_device_detach(netdev);
8699
8700         if (state == pci_channel_io_perm_failure)
8701                 return PCI_ERS_RESULT_DISCONNECT;
8702
8703         if (netif_running(netdev)) {
8704                 /* Bring down the card, while avoiding PCI I/O */
8705                 do_s2io_card_down(sp, 0);
8706         }
8707         pci_disable_device(pdev);
8708
8709         return PCI_ERS_RESULT_NEED_RESET;
8710 }
8711
8712 /**
8713  * s2io_io_slot_reset - called after the pci bus has been reset.
8714  * @pdev: Pointer to PCI device
8715  *
8716  * Restart the card from scratch, as if from a cold-boot.
8717  * At this point, the card has exprienced a hard reset,
8718  * followed by fixups by BIOS, and has its config space
8719  * set up identically to what it was at cold boot.
8720  */
8721 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8722 {
8723         struct net_device *netdev = pci_get_drvdata(pdev);
8724         struct s2io_nic *sp = netdev_priv(netdev);
8725
8726         if (pci_enable_device(pdev)) {
8727                 pr_err("Cannot re-enable PCI device after reset.\n");
8728                 return PCI_ERS_RESULT_DISCONNECT;
8729         }
8730
8731         pci_set_master(pdev);
8732         s2io_reset(sp);
8733
8734         return PCI_ERS_RESULT_RECOVERED;
8735 }
8736
8737 /**
8738  * s2io_io_resume - called when traffic can start flowing again.
8739  * @pdev: Pointer to PCI device
8740  *
8741  * This callback is called when the error recovery driver tells
8742  * us that its OK to resume normal operation.
8743  */
8744 static void s2io_io_resume(struct pci_dev *pdev)
8745 {
8746         struct net_device *netdev = pci_get_drvdata(pdev);
8747         struct s2io_nic *sp = netdev_priv(netdev);
8748
8749         if (netif_running(netdev)) {
8750                 if (s2io_card_up(sp)) {
8751                         pr_err("Can't bring device back up after reset.\n");
8752                         return;
8753                 }
8754
8755                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8756                         s2io_card_down(sp);
8757                         pr_err("Can't restore mac addr after reset.\n");
8758                         return;
8759                 }
8760         }
8761
8762         netif_device_attach(netdev);
8763         netif_tx_wake_all_queues(netdev);
8764 }