be2net: Fix be_tx_q_clean() being called on freed queues
[safe/jmp/linux-2.6] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include <asm/div64.h>
20
21 MODULE_VERSION(DRV_VER);
22 MODULE_DEVICE_TABLE(pci, be_dev_ids);
23 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
24 MODULE_AUTHOR("ServerEngines Corporation");
25 MODULE_LICENSE("GPL");
26
27 static unsigned int rx_frag_size = 2048;
28 module_param(rx_frag_size, uint, S_IRUGO);
29 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
30
31 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
32         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
33         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
34         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
35         { 0 }
36 };
37 MODULE_DEVICE_TABLE(pci, be_dev_ids);
38
39 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
40 {
41         struct be_dma_mem *mem = &q->dma_mem;
42         if (mem->va)
43                 pci_free_consistent(adapter->pdev, mem->size,
44                         mem->va, mem->dma);
45 }
46
47 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
48                 u16 len, u16 entry_size)
49 {
50         struct be_dma_mem *mem = &q->dma_mem;
51
52         memset(q, 0, sizeof(*q));
53         q->len = len;
54         q->entry_size = entry_size;
55         mem->size = len * entry_size;
56         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
57         if (!mem->va)
58                 return -1;
59         memset(mem->va, 0, mem->size);
60         return 0;
61 }
62
63 static inline void *queue_head_node(struct be_queue_info *q)
64 {
65         return q->dma_mem.va + q->head * q->entry_size;
66 }
67
68 static inline void *queue_tail_node(struct be_queue_info *q)
69 {
70         return q->dma_mem.va + q->tail * q->entry_size;
71 }
72
73 static inline void queue_head_inc(struct be_queue_info *q)
74 {
75         index_inc(&q->head, q->len);
76 }
77
78 static inline void queue_tail_inc(struct be_queue_info *q)
79 {
80         index_inc(&q->tail, q->len);
81 }
82
83 static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
84 {
85         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
86         u32 reg = ioread32(addr);
87         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
88         if (!enabled && enable) {
89                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
90         } else if (enabled && !enable) {
91                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
92         } else {
93                 printk(KERN_WARNING DRV_NAME
94                         ": bad value in membar_int_ctrl reg=0x%x\n", reg);
95                 return;
96         }
97         iowrite32(reg, addr);
98 }
99
100 static void be_rxq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
101 {
102         u32 val = 0;
103         val |= qid & DB_RQ_RING_ID_MASK;
104         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
105         iowrite32(val, ctrl->db + DB_RQ_OFFSET);
106 }
107
108 static void be_txq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
109 {
110         u32 val = 0;
111         val |= qid & DB_TXULP_RING_ID_MASK;
112         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
113         iowrite32(val, ctrl->db + DB_TXULP1_OFFSET);
114 }
115
116 static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
117                 bool arm, bool clear_int, u16 num_popped)
118 {
119         u32 val = 0;
120         val |= qid & DB_EQ_RING_ID_MASK;
121         if (arm)
122                 val |= 1 << DB_EQ_REARM_SHIFT;
123         if (clear_int)
124                 val |= 1 << DB_EQ_CLR_SHIFT;
125         val |= 1 << DB_EQ_EVNT_SHIFT;
126         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
127         iowrite32(val, ctrl->db + DB_EQ_OFFSET);
128 }
129
130 static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
131                 bool arm, u16 num_popped)
132 {
133         u32 val = 0;
134         val |= qid & DB_CQ_RING_ID_MASK;
135         if (arm)
136                 val |= 1 << DB_CQ_REARM_SHIFT;
137         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
138         iowrite32(val, ctrl->db + DB_CQ_OFFSET);
139 }
140
141
142 static int be_mac_addr_set(struct net_device *netdev, void *p)
143 {
144         struct be_adapter *adapter = netdev_priv(netdev);
145         struct sockaddr *addr = p;
146         int status = 0;
147
148         if (netif_running(netdev)) {
149                 status = be_cmd_pmac_del(&adapter->ctrl, adapter->if_handle,
150                                 adapter->pmac_id);
151                 if (status)
152                         return status;
153
154                 status = be_cmd_pmac_add(&adapter->ctrl, (u8 *)addr->sa_data,
155                                 adapter->if_handle, &adapter->pmac_id);
156         }
157
158         if (!status)
159                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
160
161         return status;
162 }
163
164 static void netdev_stats_update(struct be_adapter *adapter)
165 {
166         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
167         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
168         struct be_port_rxf_stats *port_stats =
169                         &rxf_stats->port[adapter->port_num];
170         struct net_device_stats *dev_stats = &adapter->stats.net_stats;
171         struct be_erx_stats *erx_stats = &hw_stats->erx;
172
173         dev_stats->rx_packets = port_stats->rx_total_frames;
174         dev_stats->tx_packets = port_stats->tx_unicastframes +
175                 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
176         dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
177                                 (u64) port_stats->rx_bytes_lsd;
178         dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
179                                 (u64) port_stats->tx_bytes_lsd;
180
181         /* bad pkts received */
182         dev_stats->rx_errors = port_stats->rx_crc_errors +
183                 port_stats->rx_alignment_symbol_errors +
184                 port_stats->rx_in_range_errors +
185                 port_stats->rx_out_range_errors +
186                 port_stats->rx_frame_too_long +
187                 port_stats->rx_dropped_too_small +
188                 port_stats->rx_dropped_too_short +
189                 port_stats->rx_dropped_header_too_small +
190                 port_stats->rx_dropped_tcp_length +
191                 port_stats->rx_dropped_runt +
192                 port_stats->rx_tcp_checksum_errs +
193                 port_stats->rx_ip_checksum_errs +
194                 port_stats->rx_udp_checksum_errs;
195
196         /*  no space in linux buffers: best possible approximation */
197         dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
198
199         /* detailed rx errors */
200         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
201                 port_stats->rx_out_range_errors +
202                 port_stats->rx_frame_too_long;
203
204         /* receive ring buffer overflow */
205         dev_stats->rx_over_errors = 0;
206
207         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
208
209         /* frame alignment errors */
210         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
211
212         /* receiver fifo overrun */
213         /* drops_no_pbuf is no per i/f, it's per BE card */
214         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
215                                         port_stats->rx_input_fifo_overflow +
216                                         rxf_stats->rx_drops_no_pbuf;
217         /* receiver missed packetd */
218         dev_stats->rx_missed_errors = 0;
219
220         /*  packet transmit problems */
221         dev_stats->tx_errors = 0;
222
223         /* no space available in linux */
224         dev_stats->tx_dropped = 0;
225
226         dev_stats->multicast = port_stats->tx_multicastframes;
227         dev_stats->collisions = 0;
228
229         /* detailed tx_errors */
230         dev_stats->tx_aborted_errors = 0;
231         dev_stats->tx_carrier_errors = 0;
232         dev_stats->tx_fifo_errors = 0;
233         dev_stats->tx_heartbeat_errors = 0;
234         dev_stats->tx_window_errors = 0;
235 }
236
237 static void be_link_status_update(struct be_adapter *adapter)
238 {
239         struct be_link_info *prev = &adapter->link;
240         struct be_link_info now = { 0 };
241         struct net_device *netdev = adapter->netdev;
242
243         be_cmd_link_status_query(&adapter->ctrl, &now);
244
245         /* If link came up or went down */
246         if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
247                         prev->speed == PHY_LINK_SPEED_ZERO)) {
248                 if (now.speed == PHY_LINK_SPEED_ZERO) {
249                         netif_stop_queue(netdev);
250                         netif_carrier_off(netdev);
251                         printk(KERN_INFO "%s: Link down\n", netdev->name);
252                 } else {
253                         netif_start_queue(netdev);
254                         netif_carrier_on(netdev);
255                         printk(KERN_INFO "%s: Link up\n", netdev->name);
256                 }
257         }
258         *prev = now;
259 }
260
261 /* Update the EQ delay n BE based on the RX frags consumed / sec */
262 static void be_rx_eqd_update(struct be_adapter *adapter)
263 {
264         struct be_ctrl_info *ctrl = &adapter->ctrl;
265         struct be_eq_obj *rx_eq = &adapter->rx_eq;
266         struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
267         ulong now = jiffies;
268         u32 eqd;
269
270         if (!rx_eq->enable_aic)
271                 return;
272
273         /* Wrapped around */
274         if (time_before(now, stats->rx_fps_jiffies)) {
275                 stats->rx_fps_jiffies = now;
276                 return;
277         }
278
279         /* Update once a second */
280         if ((now - stats->rx_fps_jiffies) < HZ)
281                 return;
282
283         stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
284                         ((now - stats->rx_fps_jiffies) / HZ);
285
286         stats->rx_fps_jiffies = now;
287         stats->be_prev_rx_frags = stats->be_rx_frags;
288         eqd = stats->be_rx_fps / 110000;
289         eqd = eqd << 3;
290         if (eqd > rx_eq->max_eqd)
291                 eqd = rx_eq->max_eqd;
292         if (eqd < rx_eq->min_eqd)
293                 eqd = rx_eq->min_eqd;
294         if (eqd < 10)
295                 eqd = 0;
296         if (eqd != rx_eq->cur_eqd)
297                 be_cmd_modify_eqd(ctrl, rx_eq->q.id, eqd);
298
299         rx_eq->cur_eqd = eqd;
300 }
301
302 static struct net_device_stats *be_get_stats(struct net_device *dev)
303 {
304         struct be_adapter *adapter = netdev_priv(dev);
305
306         return &adapter->stats.net_stats;
307 }
308
309 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
310 {
311         u64 rate = bytes;
312
313         do_div(rate, ticks / HZ);
314         rate <<= 3;                     /* bytes/sec -> bits/sec */
315         do_div(rate, 1000000ul);        /* MB/Sec */
316
317         return rate;
318 }
319
320 static void be_tx_rate_update(struct be_adapter *adapter)
321 {
322         struct be_drvr_stats *stats = drvr_stats(adapter);
323         ulong now = jiffies;
324
325         /* Wrapped around? */
326         if (time_before(now, stats->be_tx_jiffies)) {
327                 stats->be_tx_jiffies = now;
328                 return;
329         }
330
331         /* Update tx rate once in two seconds */
332         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
333                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
334                                                   - stats->be_tx_bytes_prev,
335                                                  now - stats->be_tx_jiffies);
336                 stats->be_tx_jiffies = now;
337                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
338         }
339 }
340
341 static void be_tx_stats_update(struct be_adapter *adapter,
342                         u32 wrb_cnt, u32 copied, bool stopped)
343 {
344         struct be_drvr_stats *stats = drvr_stats(adapter);
345         stats->be_tx_reqs++;
346         stats->be_tx_wrbs += wrb_cnt;
347         stats->be_tx_bytes += copied;
348         if (stopped)
349                 stats->be_tx_stops++;
350 }
351
352 /* Determine number of WRB entries needed to xmit data in an skb */
353 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
354 {
355         int cnt = (skb->len > skb->data_len);
356
357         cnt += skb_shinfo(skb)->nr_frags;
358
359         /* to account for hdr wrb */
360         cnt++;
361         if (cnt & 1) {
362                 /* add a dummy to make it an even num */
363                 cnt++;
364                 *dummy = true;
365         } else
366                 *dummy = false;
367         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
368         return cnt;
369 }
370
371 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
372 {
373         wrb->frag_pa_hi = upper_32_bits(addr);
374         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
375         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
376 }
377
378 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
379                 bool vlan, u32 wrb_cnt, u32 len)
380 {
381         memset(hdr, 0, sizeof(*hdr));
382
383         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
384
385         if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
386                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
387                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
388                         hdr, skb_shinfo(skb)->gso_size);
389         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
390                 if (is_tcp_pkt(skb))
391                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
392                 else if (is_udp_pkt(skb))
393                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
394         }
395
396         if (vlan && vlan_tx_tag_present(skb)) {
397                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
398                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
399                         hdr, vlan_tx_tag_get(skb));
400         }
401
402         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
403         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
404         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
405         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
406 }
407
408
409 static int make_tx_wrbs(struct be_adapter *adapter,
410                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
411 {
412         u64 busaddr;
413         u32 i, copied = 0;
414         struct pci_dev *pdev = adapter->pdev;
415         struct sk_buff *first_skb = skb;
416         struct be_queue_info *txq = &adapter->tx_obj.q;
417         struct be_eth_wrb *wrb;
418         struct be_eth_hdr_wrb *hdr;
419
420         atomic_add(wrb_cnt, &txq->used);
421         hdr = queue_head_node(txq);
422         queue_head_inc(txq);
423
424         if (skb->len > skb->data_len) {
425                 int len = skb->len - skb->data_len;
426                 busaddr = pci_map_single(pdev, skb->data, len,
427                                          PCI_DMA_TODEVICE);
428                 wrb = queue_head_node(txq);
429                 wrb_fill(wrb, busaddr, len);
430                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
431                 queue_head_inc(txq);
432                 copied += len;
433         }
434
435         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
436                 struct skb_frag_struct *frag =
437                         &skb_shinfo(skb)->frags[i];
438                 busaddr = pci_map_page(pdev, frag->page,
439                                        frag->page_offset,
440                                        frag->size, PCI_DMA_TODEVICE);
441                 wrb = queue_head_node(txq);
442                 wrb_fill(wrb, busaddr, frag->size);
443                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
444                 queue_head_inc(txq);
445                 copied += frag->size;
446         }
447
448         if (dummy_wrb) {
449                 wrb = queue_head_node(txq);
450                 wrb_fill(wrb, 0, 0);
451                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
452                 queue_head_inc(txq);
453         }
454
455         wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
456                 wrb_cnt, copied);
457         be_dws_cpu_to_le(hdr, sizeof(*hdr));
458
459         return copied;
460 }
461
462 static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
463 {
464         struct be_adapter *adapter = netdev_priv(netdev);
465         struct be_tx_obj *tx_obj = &adapter->tx_obj;
466         struct be_queue_info *txq = &tx_obj->q;
467         u32 wrb_cnt = 0, copied = 0;
468         u32 start = txq->head;
469         bool dummy_wrb, stopped = false;
470
471         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
472
473         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
474
475         /* record the sent skb in the sent_skb table */
476         BUG_ON(tx_obj->sent_skb_list[start]);
477         tx_obj->sent_skb_list[start] = skb;
478
479         /* Ensure that txq has space for the next skb; Else stop the queue
480          * *BEFORE* ringing the tx doorbell, so that we serialze the
481          * tx compls of the current transmit which'll wake up the queue
482          */
483         if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) {
484                 netif_stop_queue(netdev);
485                 stopped = true;
486         }
487
488         be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
489
490         be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
491         return NETDEV_TX_OK;
492 }
493
494 static int be_change_mtu(struct net_device *netdev, int new_mtu)
495 {
496         struct be_adapter *adapter = netdev_priv(netdev);
497         if (new_mtu < BE_MIN_MTU ||
498                         new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
499                 dev_info(&adapter->pdev->dev,
500                         "MTU must be between %d and %d bytes\n",
501                         BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
502                 return -EINVAL;
503         }
504         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
505                         netdev->mtu, new_mtu);
506         netdev->mtu = new_mtu;
507         return 0;
508 }
509
510 /*
511  * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
512  * program them in BE.  If more than BE_NUM_VLANS_SUPPORTED are configured,
513  * set the BE in promiscuous VLAN mode.
514  */
515 static void be_vid_config(struct net_device *netdev)
516 {
517         struct be_adapter *adapter = netdev_priv(netdev);
518         u16 vtag[BE_NUM_VLANS_SUPPORTED];
519         u16 ntags = 0, i;
520
521         if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED)  {
522                 /* Construct VLAN Table to give to HW */
523                 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
524                         if (adapter->vlan_tag[i]) {
525                                 vtag[ntags] = cpu_to_le16(i);
526                                 ntags++;
527                         }
528                 }
529                 be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
530                         vtag, ntags, 1, 0);
531         } else {
532                 be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
533                         NULL, 0, 1, 1);
534         }
535 }
536
537 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
538 {
539         struct be_adapter *adapter = netdev_priv(netdev);
540         struct be_eq_obj *rx_eq = &adapter->rx_eq;
541         struct be_eq_obj *tx_eq = &adapter->tx_eq;
542         struct be_ctrl_info *ctrl = &adapter->ctrl;
543
544         be_eq_notify(ctrl, rx_eq->q.id, false, false, 0);
545         be_eq_notify(ctrl, tx_eq->q.id, false, false, 0);
546         adapter->vlan_grp = grp;
547         be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
548         be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
549 }
550
551 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
552 {
553         struct be_adapter *adapter = netdev_priv(netdev);
554
555         adapter->num_vlans++;
556         adapter->vlan_tag[vid] = 1;
557
558         be_vid_config(netdev);
559 }
560
561 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
562 {
563         struct be_adapter *adapter = netdev_priv(netdev);
564
565         adapter->num_vlans--;
566         adapter->vlan_tag[vid] = 0;
567
568         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
569         be_vid_config(netdev);
570 }
571
572 static void be_set_multicast_filter(struct net_device *netdev)
573 {
574         struct be_adapter *adapter = netdev_priv(netdev);
575         struct dev_mc_list *mc_ptr;
576         u8 mac_addr[32][ETH_ALEN];
577         int i = 0;
578
579         if (netdev->flags & IFF_ALLMULTI) {
580                 /* set BE in Multicast promiscuous */
581                 be_cmd_mcast_mac_set(&adapter->ctrl,
582                                         adapter->if_handle, NULL, 0, true);
583                 return;
584         }
585
586         for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
587                 memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
588                 if (++i >= 32) {
589                         be_cmd_mcast_mac_set(&adapter->ctrl,
590                                 adapter->if_handle, &mac_addr[0][0], i, false);
591                         i = 0;
592                 }
593
594         }
595
596         if (i) {
597                 /* reset the promiscuous mode also. */
598                 be_cmd_mcast_mac_set(&adapter->ctrl,
599                         adapter->if_handle, &mac_addr[0][0], i, false);
600         }
601 }
602
603 static void be_set_multicast_list(struct net_device *netdev)
604 {
605         struct be_adapter *adapter = netdev_priv(netdev);
606
607         if (netdev->flags & IFF_PROMISC) {
608                 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
609         } else {
610                 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
611                 be_set_multicast_filter(netdev);
612         }
613 }
614
615 static void be_rx_rate_update(struct be_adapter *adapter)
616 {
617         struct be_drvr_stats *stats = drvr_stats(adapter);
618         ulong now = jiffies;
619
620         /* Wrapped around */
621         if (time_before(now, stats->be_rx_jiffies)) {
622                 stats->be_rx_jiffies = now;
623                 return;
624         }
625
626         /* Update the rate once in two seconds */
627         if ((now - stats->be_rx_jiffies) < 2 * HZ)
628                 return;
629
630         stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
631                                           - stats->be_rx_bytes_prev,
632                                          now - stats->be_rx_jiffies);
633         stats->be_rx_jiffies = now;
634         stats->be_rx_bytes_prev = stats->be_rx_bytes;
635 }
636
637 static void be_rx_stats_update(struct be_adapter *adapter,
638                 u32 pktsize, u16 numfrags)
639 {
640         struct be_drvr_stats *stats = drvr_stats(adapter);
641
642         stats->be_rx_compl++;
643         stats->be_rx_frags += numfrags;
644         stats->be_rx_bytes += pktsize;
645 }
646
647 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
648 {
649         u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
650
651         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
652         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
653         ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
654         if (ip_version) {
655                 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
656                 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
657         }
658         ipv6_chk = (ip_version && (tcpf || udpf));
659
660         return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
661 }
662
663 static struct be_rx_page_info *
664 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
665 {
666         struct be_rx_page_info *rx_page_info;
667         struct be_queue_info *rxq = &adapter->rx_obj.q;
668
669         rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
670         BUG_ON(!rx_page_info->page);
671
672         if (rx_page_info->last_page_user)
673                 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
674                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
675
676         atomic_dec(&rxq->used);
677         return rx_page_info;
678 }
679
680 /* Throwaway the data in the Rx completion */
681 static void be_rx_compl_discard(struct be_adapter *adapter,
682                         struct be_eth_rx_compl *rxcp)
683 {
684         struct be_queue_info *rxq = &adapter->rx_obj.q;
685         struct be_rx_page_info *page_info;
686         u16 rxq_idx, i, num_rcvd;
687
688         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
689         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
690
691         for (i = 0; i < num_rcvd; i++) {
692                 page_info = get_rx_page_info(adapter, rxq_idx);
693                 put_page(page_info->page);
694                 memset(page_info, 0, sizeof(*page_info));
695                 index_inc(&rxq_idx, rxq->len);
696         }
697 }
698
699 /*
700  * skb_fill_rx_data forms a complete skb for an ether frame
701  * indicated by rxcp.
702  */
703 static void skb_fill_rx_data(struct be_adapter *adapter,
704                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
705 {
706         struct be_queue_info *rxq = &adapter->rx_obj.q;
707         struct be_rx_page_info *page_info;
708         u16 rxq_idx, i, num_rcvd;
709         u32 pktsize, hdr_len, curr_frag_len;
710         u8 *start;
711
712         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
713         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
714         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
715
716         page_info = get_rx_page_info(adapter, rxq_idx);
717
718         start = page_address(page_info->page) + page_info->page_offset;
719         prefetch(start);
720
721         /* Copy data in the first descriptor of this completion */
722         curr_frag_len = min(pktsize, rx_frag_size);
723
724         /* Copy the header portion into skb_data */
725         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
726         memcpy(skb->data, start, hdr_len);
727         skb->len = curr_frag_len;
728         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
729                 /* Complete packet has now been moved to data */
730                 put_page(page_info->page);
731                 skb->data_len = 0;
732                 skb->tail += curr_frag_len;
733         } else {
734                 skb_shinfo(skb)->nr_frags = 1;
735                 skb_shinfo(skb)->frags[0].page = page_info->page;
736                 skb_shinfo(skb)->frags[0].page_offset =
737                                         page_info->page_offset + hdr_len;
738                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
739                 skb->data_len = curr_frag_len - hdr_len;
740                 skb->tail += hdr_len;
741         }
742         memset(page_info, 0, sizeof(*page_info));
743
744         if (pktsize <= rx_frag_size) {
745                 BUG_ON(num_rcvd != 1);
746                 goto done;
747         }
748
749         /* More frags present for this completion */
750         pktsize -= curr_frag_len; /* account for above copied frag */
751         for (i = 1; i < num_rcvd; i++) {
752                 index_inc(&rxq_idx, rxq->len);
753                 page_info = get_rx_page_info(adapter, rxq_idx);
754
755                 curr_frag_len = min(pktsize, rx_frag_size);
756
757                 skb_shinfo(skb)->frags[i].page = page_info->page;
758                 skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
759                 skb_shinfo(skb)->frags[i].size = curr_frag_len;
760                 skb->len += curr_frag_len;
761                 skb->data_len += curr_frag_len;
762                 skb_shinfo(skb)->nr_frags++;
763                 pktsize -= curr_frag_len;
764
765                 memset(page_info, 0, sizeof(*page_info));
766         }
767
768 done:
769         be_rx_stats_update(adapter, pktsize, num_rcvd);
770         return;
771 }
772
773 /* Process the RX completion indicated by rxcp when LRO is disabled */
774 static void be_rx_compl_process(struct be_adapter *adapter,
775                         struct be_eth_rx_compl *rxcp)
776 {
777         struct sk_buff *skb;
778         u32 vtp, vid;
779
780         vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
781
782         skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
783         if (!skb) {
784                 if (net_ratelimit())
785                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
786                 be_rx_compl_discard(adapter, rxcp);
787                 return;
788         }
789
790         skb_reserve(skb, NET_IP_ALIGN);
791
792         skb_fill_rx_data(adapter, skb, rxcp);
793
794         if (do_pkt_csum(rxcp, adapter->rx_csum))
795                 skb->ip_summed = CHECKSUM_NONE;
796         else
797                 skb->ip_summed = CHECKSUM_UNNECESSARY;
798
799         skb->truesize = skb->len + sizeof(struct sk_buff);
800         skb->protocol = eth_type_trans(skb, adapter->netdev);
801         skb->dev = adapter->netdev;
802
803         if (vtp) {
804                 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
805                         kfree_skb(skb);
806                         return;
807                 }
808                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
809                 vid = be16_to_cpu(vid);
810                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
811         } else {
812                 netif_receive_skb(skb);
813         }
814
815         adapter->netdev->last_rx = jiffies;
816
817         return;
818 }
819
820 /* Process the RX completion indicated by rxcp when LRO is enabled */
821 static void be_rx_compl_process_lro(struct be_adapter *adapter,
822                         struct be_eth_rx_compl *rxcp)
823 {
824         struct be_rx_page_info *page_info;
825         struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
826         struct be_queue_info *rxq = &adapter->rx_obj.q;
827         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
828         u16 i, rxq_idx = 0, vid;
829
830         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
831         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
832         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
833         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
834
835         remaining = pkt_size;
836         for (i = 0; i < num_rcvd; i++) {
837                 page_info = get_rx_page_info(adapter, rxq_idx);
838
839                 curr_frag_len = min(remaining, rx_frag_size);
840
841                 rx_frags[i].page = page_info->page;
842                 rx_frags[i].page_offset = page_info->page_offset;
843                 rx_frags[i].size = curr_frag_len;
844                 remaining -= curr_frag_len;
845
846                 index_inc(&rxq_idx, rxq->len);
847
848                 memset(page_info, 0, sizeof(*page_info));
849         }
850
851         if (likely(!vlanf)) {
852                 lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
853                                 pkt_size, NULL, 0);
854         } else {
855                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
856                 vid = be16_to_cpu(vid);
857
858                 if (!adapter->vlan_grp || adapter->num_vlans == 0)
859                         return;
860
861                 lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr,
862                         rx_frags, pkt_size, pkt_size, adapter->vlan_grp,
863                         vid, NULL, 0);
864         }
865
866         be_rx_stats_update(adapter, pkt_size, num_rcvd);
867         return;
868 }
869
870 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
871 {
872         struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
873
874         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
875                 return NULL;
876
877         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
878
879         queue_tail_inc(&adapter->rx_obj.cq);
880         return rxcp;
881 }
882
883 /* To reset the valid bit, we need to reset the whole word as
884  * when walking the queue the valid entries are little-endian
885  * and invalid entries are host endian
886  */
887 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
888 {
889         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
890 }
891
892 static inline struct page *be_alloc_pages(u32 size)
893 {
894         gfp_t alloc_flags = GFP_ATOMIC;
895         u32 order = get_order(size);
896         if (order > 0)
897                 alloc_flags |= __GFP_COMP;
898         return  alloc_pages(alloc_flags, order);
899 }
900
901 /*
902  * Allocate a page, split it to fragments of size rx_frag_size and post as
903  * receive buffers to BE
904  */
905 static void be_post_rx_frags(struct be_adapter *adapter)
906 {
907         struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
908         struct be_rx_page_info *page_info = NULL;
909         struct be_queue_info *rxq = &adapter->rx_obj.q;
910         struct page *pagep = NULL;
911         struct be_eth_rx_d *rxd;
912         u64 page_dmaaddr = 0, frag_dmaaddr;
913         u32 posted, page_offset = 0;
914
915         page_info = &page_info_tbl[rxq->head];
916         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
917                 if (!pagep) {
918                         pagep = be_alloc_pages(adapter->big_page_size);
919                         if (unlikely(!pagep)) {
920                                 drvr_stats(adapter)->be_ethrx_post_fail++;
921                                 break;
922                         }
923                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
924                                                 adapter->big_page_size,
925                                                 PCI_DMA_FROMDEVICE);
926                         page_info->page_offset = 0;
927                 } else {
928                         get_page(pagep);
929                         page_info->page_offset = page_offset + rx_frag_size;
930                 }
931                 page_offset = page_info->page_offset;
932                 page_info->page = pagep;
933                 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
934                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
935
936                 rxd = queue_head_node(rxq);
937                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
938                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
939                 queue_head_inc(rxq);
940
941                 /* Any space left in the current big page for another frag? */
942                 if ((page_offset + rx_frag_size + rx_frag_size) >
943                                         adapter->big_page_size) {
944                         pagep = NULL;
945                         page_info->last_page_user = true;
946                 }
947                 page_info = &page_info_tbl[rxq->head];
948         }
949         if (pagep)
950                 page_info->last_page_user = true;
951
952         if (posted) {
953                 atomic_add(posted, &rxq->used);
954                 be_rxq_notify(&adapter->ctrl, rxq->id, posted);
955         } else if (atomic_read(&rxq->used) == 0) {
956                 /* Let be_worker replenish when memory is available */
957                 adapter->rx_post_starved = true;
958         }
959
960         return;
961 }
962
963 static struct be_eth_tx_compl *
964 be_tx_compl_get(struct be_adapter *adapter)
965 {
966         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
967         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
968
969         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
970                 return NULL;
971
972         be_dws_le_to_cpu(txcp, sizeof(*txcp));
973
974         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
975
976         queue_tail_inc(tx_cq);
977         return txcp;
978 }
979
980 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
981 {
982         struct be_queue_info *txq = &adapter->tx_obj.q;
983         struct be_eth_wrb *wrb;
984         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
985         struct sk_buff *sent_skb;
986         u64 busaddr;
987         u16 cur_index, num_wrbs = 0;
988
989         cur_index = txq->tail;
990         sent_skb = sent_skbs[cur_index];
991         BUG_ON(!sent_skb);
992         sent_skbs[cur_index] = NULL;
993
994         do {
995                 cur_index = txq->tail;
996                 wrb = queue_tail_node(txq);
997                 be_dws_le_to_cpu(wrb, sizeof(*wrb));
998                 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
999                 if (busaddr != 0) {
1000                         pci_unmap_single(adapter->pdev, busaddr,
1001                                 wrb->frag_len, PCI_DMA_TODEVICE);
1002                 }
1003                 num_wrbs++;
1004                 queue_tail_inc(txq);
1005         } while (cur_index != last_index);
1006
1007         atomic_sub(num_wrbs, &txq->used);
1008
1009         kfree_skb(sent_skb);
1010 }
1011
1012 static void be_rx_q_clean(struct be_adapter *adapter)
1013 {
1014         struct be_rx_page_info *page_info;
1015         struct be_queue_info *rxq = &adapter->rx_obj.q;
1016         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1017         struct be_eth_rx_compl *rxcp;
1018         u16 tail;
1019
1020         /* First cleanup pending rx completions */
1021         while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1022                 be_rx_compl_discard(adapter, rxcp);
1023                 be_rx_compl_reset(rxcp);
1024                 be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1);
1025         }
1026
1027         /* Then free posted rx buffer that were not used */
1028         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1029         for (; tail != rxq->head; index_inc(&tail, rxq->len)) {
1030                 page_info = get_rx_page_info(adapter, tail);
1031                 put_page(page_info->page);
1032                 memset(page_info, 0, sizeof(*page_info));
1033         }
1034         BUG_ON(atomic_read(&rxq->used));
1035 }
1036
1037 static void be_tx_q_clean(struct be_adapter *adapter)
1038 {
1039         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1040         struct sk_buff *sent_skb;
1041         struct be_queue_info *txq = &adapter->tx_obj.q;
1042         u16 last_index;
1043         bool dummy_wrb;
1044
1045         while (atomic_read(&txq->used)) {
1046                 sent_skb = sent_skbs[txq->tail];
1047                 last_index = txq->tail;
1048                 index_adv(&last_index,
1049                         wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1050                 be_tx_compl_process(adapter, last_index);
1051         }
1052 }
1053
1054 static void be_tx_queues_destroy(struct be_adapter *adapter)
1055 {
1056         struct be_queue_info *q;
1057
1058         q = &adapter->tx_obj.q;
1059         if (q->created) {
1060                 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ);
1061
1062                 /* No more tx completions can be rcvd now; clean up if there
1063                  * are any pending completions or pending tx requests */
1064                 be_tx_q_clean(adapter);
1065         }
1066         be_queue_free(adapter, q);
1067
1068         q = &adapter->tx_obj.cq;
1069         if (q->created)
1070                 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
1071         be_queue_free(adapter, q);
1072
1073         q = &adapter->tx_eq.q;
1074         if (q->created)
1075                 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
1076         be_queue_free(adapter, q);
1077 }
1078
1079 static int be_tx_queues_create(struct be_adapter *adapter)
1080 {
1081         struct be_queue_info *eq, *q, *cq;
1082
1083         adapter->tx_eq.max_eqd = 0;
1084         adapter->tx_eq.min_eqd = 0;
1085         adapter->tx_eq.cur_eqd = 96;
1086         adapter->tx_eq.enable_aic = false;
1087         /* Alloc Tx Event queue */
1088         eq = &adapter->tx_eq.q;
1089         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1090                 return -1;
1091
1092         /* Ask BE to create Tx Event queue */
1093         if (be_cmd_eq_create(&adapter->ctrl, eq, adapter->tx_eq.cur_eqd))
1094                 goto tx_eq_free;
1095         /* Alloc TX eth compl queue */
1096         cq = &adapter->tx_obj.cq;
1097         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1098                         sizeof(struct be_eth_tx_compl)))
1099                 goto tx_eq_destroy;
1100
1101         /* Ask BE to create Tx eth compl queue */
1102         if (be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3))
1103                 goto tx_cq_free;
1104
1105         /* Alloc TX eth queue */
1106         q = &adapter->tx_obj.q;
1107         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1108                 goto tx_cq_destroy;
1109
1110         /* Ask BE to create Tx eth queue */
1111         if (be_cmd_txq_create(&adapter->ctrl, q, cq))
1112                 goto tx_q_free;
1113         return 0;
1114
1115 tx_q_free:
1116         be_queue_free(adapter, q);
1117 tx_cq_destroy:
1118         be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
1119 tx_cq_free:
1120         be_queue_free(adapter, cq);
1121 tx_eq_destroy:
1122         be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
1123 tx_eq_free:
1124         be_queue_free(adapter, eq);
1125         return -1;
1126 }
1127
1128 static void be_rx_queues_destroy(struct be_adapter *adapter)
1129 {
1130         struct be_queue_info *q;
1131
1132         q = &adapter->rx_obj.q;
1133         if (q->created) {
1134                 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_RXQ);
1135                 be_rx_q_clean(adapter);
1136         }
1137         be_queue_free(adapter, q);
1138
1139         q = &adapter->rx_obj.cq;
1140         if (q->created)
1141                 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
1142         be_queue_free(adapter, q);
1143
1144         q = &adapter->rx_eq.q;
1145         if (q->created)
1146                 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
1147         be_queue_free(adapter, q);
1148 }
1149
1150 static int be_rx_queues_create(struct be_adapter *adapter)
1151 {
1152         struct be_queue_info *eq, *q, *cq;
1153         int rc;
1154
1155         adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
1156         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1157         adapter->rx_eq.max_eqd = BE_MAX_EQD;
1158         adapter->rx_eq.min_eqd = 0;
1159         adapter->rx_eq.cur_eqd = 0;
1160         adapter->rx_eq.enable_aic = true;
1161
1162         /* Alloc Rx Event queue */
1163         eq = &adapter->rx_eq.q;
1164         rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1165                                 sizeof(struct be_eq_entry));
1166         if (rc)
1167                 return rc;
1168
1169         /* Ask BE to create Rx Event queue */
1170         rc = be_cmd_eq_create(&adapter->ctrl, eq, adapter->rx_eq.cur_eqd);
1171         if (rc)
1172                 goto rx_eq_free;
1173
1174         /* Alloc RX eth compl queue */
1175         cq = &adapter->rx_obj.cq;
1176         rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1177                         sizeof(struct be_eth_rx_compl));
1178         if (rc)
1179                 goto rx_eq_destroy;
1180
1181         /* Ask BE to create Rx eth compl queue */
1182         rc = be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3);
1183         if (rc)
1184                 goto rx_cq_free;
1185
1186         /* Alloc RX eth queue */
1187         q = &adapter->rx_obj.q;
1188         rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1189         if (rc)
1190                 goto rx_cq_destroy;
1191
1192         /* Ask BE to create Rx eth queue */
1193         rc = be_cmd_rxq_create(&adapter->ctrl, q, cq->id, rx_frag_size,
1194                 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1195         if (rc)
1196                 goto rx_q_free;
1197
1198         return 0;
1199 rx_q_free:
1200         be_queue_free(adapter, q);
1201 rx_cq_destroy:
1202         be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
1203 rx_cq_free:
1204         be_queue_free(adapter, cq);
1205 rx_eq_destroy:
1206         be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
1207 rx_eq_free:
1208         be_queue_free(adapter, eq);
1209         return rc;
1210 }
1211 static bool event_get(struct be_eq_obj *eq_obj, u16 *rid)
1212 {
1213         struct be_eq_entry *entry = queue_tail_node(&eq_obj->q);
1214         u32 evt = entry->evt;
1215
1216         if (!evt)
1217                 return false;
1218
1219         evt = le32_to_cpu(evt);
1220         *rid = (evt >> EQ_ENTRY_RES_ID_SHIFT) & EQ_ENTRY_RES_ID_MASK;
1221         entry->evt = 0;
1222         queue_tail_inc(&eq_obj->q);
1223         return true;
1224 }
1225
1226 static int event_handle(struct be_ctrl_info *ctrl,
1227                         struct be_eq_obj *eq_obj)
1228 {
1229         u16 rid = 0, num = 0;
1230
1231         while (event_get(eq_obj, &rid))
1232                 num++;
1233
1234         /* We can see an interrupt and no event */
1235         be_eq_notify(ctrl, eq_obj->q.id, true, true, num);
1236         if (num)
1237                 napi_schedule(&eq_obj->napi);
1238
1239         return num;
1240 }
1241
1242 static irqreturn_t be_intx(int irq, void *dev)
1243 {
1244         struct be_adapter *adapter = dev;
1245         struct be_ctrl_info *ctrl = &adapter->ctrl;
1246         int rx, tx;
1247
1248         tx = event_handle(ctrl, &adapter->tx_eq);
1249         rx = event_handle(ctrl, &adapter->rx_eq);
1250
1251         if (rx || tx)
1252                 return IRQ_HANDLED;
1253         else
1254                 return IRQ_NONE;
1255 }
1256
1257 static irqreturn_t be_msix_rx(int irq, void *dev)
1258 {
1259         struct be_adapter *adapter = dev;
1260
1261         event_handle(&adapter->ctrl, &adapter->rx_eq);
1262
1263         return IRQ_HANDLED;
1264 }
1265
1266 static irqreturn_t be_msix_tx(int irq, void *dev)
1267 {
1268         struct be_adapter *adapter = dev;
1269
1270         event_handle(&adapter->ctrl, &adapter->tx_eq);
1271
1272         return IRQ_HANDLED;
1273 }
1274
1275 static inline bool do_lro(struct be_adapter *adapter,
1276                         struct be_eth_rx_compl *rxcp)
1277 {
1278         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1279         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1280
1281         if (err)
1282                 drvr_stats(adapter)->be_rxcp_err++;
1283
1284         return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ?
1285                 false : true;
1286 }
1287
1288 int be_poll_rx(struct napi_struct *napi, int budget)
1289 {
1290         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1291         struct be_adapter *adapter =
1292                 container_of(rx_eq, struct be_adapter, rx_eq);
1293         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1294         struct be_eth_rx_compl *rxcp;
1295         u32 work_done;
1296
1297         for (work_done = 0; work_done < budget; work_done++) {
1298                 rxcp = be_rx_compl_get(adapter);
1299                 if (!rxcp)
1300                         break;
1301
1302                 if (do_lro(adapter, rxcp))
1303                         be_rx_compl_process_lro(adapter, rxcp);
1304                 else
1305                         be_rx_compl_process(adapter, rxcp);
1306
1307                 be_rx_compl_reset(rxcp);
1308         }
1309
1310         lro_flush_all(&adapter->rx_obj.lro_mgr);
1311
1312         /* Refill the queue */
1313         if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1314                 be_post_rx_frags(adapter);
1315
1316         /* All consumed */
1317         if (work_done < budget) {
1318                 napi_complete(napi);
1319                 be_cq_notify(&adapter->ctrl, rx_cq->id, true, work_done);
1320         } else {
1321                 /* More to be consumed; continue with interrupts disabled */
1322                 be_cq_notify(&adapter->ctrl, rx_cq->id, false, work_done);
1323         }
1324         return work_done;
1325 }
1326
1327 /* For TX we don't honour budget; consume everything */
1328 int be_poll_tx(struct napi_struct *napi, int budget)
1329 {
1330         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1331         struct be_adapter *adapter =
1332                 container_of(tx_eq, struct be_adapter, tx_eq);
1333         struct be_tx_obj *tx_obj = &adapter->tx_obj;
1334         struct be_queue_info *tx_cq = &tx_obj->cq;
1335         struct be_queue_info *txq = &tx_obj->q;
1336         struct be_eth_tx_compl *txcp;
1337         u32 num_cmpl = 0;
1338         u16 end_idx;
1339
1340         while ((txcp = be_tx_compl_get(adapter))) {
1341                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1342                                         wrb_index, txcp);
1343                 be_tx_compl_process(adapter, end_idx);
1344                 num_cmpl++;
1345         }
1346
1347         /* As Tx wrbs have been freed up, wake up netdev queue if
1348          * it was stopped due to lack of tx wrbs.
1349          */
1350         if (netif_queue_stopped(adapter->netdev) &&
1351                         atomic_read(&txq->used) < txq->len / 2) {
1352                 netif_wake_queue(adapter->netdev);
1353         }
1354
1355         napi_complete(napi);
1356
1357         be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
1358
1359         drvr_stats(adapter)->be_tx_events++;
1360         drvr_stats(adapter)->be_tx_compl += num_cmpl;
1361
1362         return 1;
1363 }
1364
1365 static void be_worker(struct work_struct *work)
1366 {
1367         struct be_adapter *adapter =
1368                 container_of(work, struct be_adapter, work.work);
1369         int status;
1370
1371         /* Check link */
1372         be_link_status_update(adapter);
1373
1374         /* Get Stats */
1375         status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
1376         if (!status)
1377                 netdev_stats_update(adapter);
1378
1379         /* Set EQ delay */
1380         be_rx_eqd_update(adapter);
1381
1382         be_tx_rate_update(adapter);
1383         be_rx_rate_update(adapter);
1384
1385         if (adapter->rx_post_starved) {
1386                 adapter->rx_post_starved = false;
1387                 be_post_rx_frags(adapter);
1388         }
1389
1390         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1391 }
1392
1393 static void be_msix_enable(struct be_adapter *adapter)
1394 {
1395         int i, status;
1396
1397         for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1398                 adapter->msix_entries[i].entry = i;
1399
1400         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1401                 BE_NUM_MSIX_VECTORS);
1402         if (status == 0)
1403                 adapter->msix_enabled = true;
1404         return;
1405 }
1406
1407 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1408 {
1409         return adapter->msix_entries[eq_id -
1410                         8 * adapter->ctrl.pci_func].vector;
1411 }
1412
1413 static int be_msix_register(struct be_adapter *adapter)
1414 {
1415         struct net_device *netdev = adapter->netdev;
1416         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1417         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1418         int status, vec;
1419
1420         sprintf(tx_eq->desc, "%s-tx", netdev->name);
1421         vec = be_msix_vec_get(adapter, tx_eq->q.id);
1422         status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
1423         if (status)
1424                 goto err;
1425
1426         sprintf(rx_eq->desc, "%s-rx", netdev->name);
1427         vec = be_msix_vec_get(adapter, rx_eq->q.id);
1428         status = request_irq(vec, be_msix_rx, 0, rx_eq->desc, adapter);
1429         if (status) { /* Free TX IRQ */
1430                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1431                 free_irq(vec, adapter);
1432                 goto err;
1433         }
1434         return 0;
1435 err:
1436         dev_warn(&adapter->pdev->dev,
1437                 "MSIX Request IRQ failed - err %d\n", status);
1438         pci_disable_msix(adapter->pdev);
1439         adapter->msix_enabled = false;
1440         return status;
1441 }
1442
1443 static int be_irq_register(struct be_adapter *adapter)
1444 {
1445         struct net_device *netdev = adapter->netdev;
1446         int status;
1447
1448         if (adapter->msix_enabled) {
1449                 status = be_msix_register(adapter);
1450                 if (status == 0)
1451                         goto done;
1452         }
1453
1454         /* INTx */
1455         netdev->irq = adapter->pdev->irq;
1456         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1457                         adapter);
1458         if (status) {
1459                 dev_err(&adapter->pdev->dev,
1460                         "INTx request IRQ failed - err %d\n", status);
1461                 return status;
1462         }
1463 done:
1464         adapter->isr_registered = true;
1465         return 0;
1466 }
1467
1468 static void be_irq_unregister(struct be_adapter *adapter)
1469 {
1470         struct net_device *netdev = adapter->netdev;
1471         int vec;
1472
1473         if (!adapter->isr_registered)
1474                 return;
1475
1476         /* INTx */
1477         if (!adapter->msix_enabled) {
1478                 free_irq(netdev->irq, adapter);
1479                 goto done;
1480         }
1481
1482         /* MSIx */
1483         vec = be_msix_vec_get(adapter, adapter->tx_eq.q.id);
1484         free_irq(vec, adapter);
1485         vec = be_msix_vec_get(adapter, adapter->rx_eq.q.id);
1486         free_irq(vec, adapter);
1487 done:
1488         adapter->isr_registered = false;
1489         return;
1490 }
1491
1492 static int be_open(struct net_device *netdev)
1493 {
1494         struct be_adapter *adapter = netdev_priv(netdev);
1495         struct be_ctrl_info *ctrl = &adapter->ctrl;
1496         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1497         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1498         u32 if_flags;
1499         int status;
1500
1501         if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
1502                 BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
1503                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1504         status = be_cmd_if_create(ctrl, if_flags, netdev->dev_addr,
1505                         false/* pmac_invalid */, &adapter->if_handle,
1506                         &adapter->pmac_id);
1507         if (status != 0)
1508                 goto do_none;
1509
1510         be_vid_config(netdev);
1511
1512         status = be_cmd_set_flow_control(ctrl, true, true);
1513         if (status != 0)
1514                 goto if_destroy;
1515
1516         status = be_tx_queues_create(adapter);
1517         if (status != 0)
1518                 goto if_destroy;
1519
1520         status = be_rx_queues_create(adapter);
1521         if (status != 0)
1522                 goto tx_qs_destroy;
1523
1524         /* First time posting */
1525         be_post_rx_frags(adapter);
1526
1527         napi_enable(&rx_eq->napi);
1528         napi_enable(&tx_eq->napi);
1529
1530         be_irq_register(adapter);
1531
1532         be_intr_set(ctrl, true);
1533
1534         /* The evt queues are created in the unarmed state; arm them */
1535         be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
1536         be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
1537
1538         /* The compl queues are created in the unarmed state; arm them */
1539         be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
1540         be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
1541
1542         be_link_status_update(adapter);
1543
1544         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1545         return 0;
1546
1547 tx_qs_destroy:
1548         be_tx_queues_destroy(adapter);
1549 if_destroy:
1550         be_cmd_if_destroy(ctrl, adapter->if_handle);
1551 do_none:
1552         return status;
1553 }
1554
1555 static int be_close(struct net_device *netdev)
1556 {
1557         struct be_adapter *adapter = netdev_priv(netdev);
1558         struct be_ctrl_info *ctrl = &adapter->ctrl;
1559         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1560         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1561         int vec;
1562
1563         cancel_delayed_work_sync(&adapter->work);
1564
1565         netif_stop_queue(netdev);
1566         netif_carrier_off(netdev);
1567         adapter->link.speed = PHY_LINK_SPEED_ZERO;
1568
1569         be_intr_set(ctrl, false);
1570
1571         if (adapter->msix_enabled) {
1572                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1573                 synchronize_irq(vec);
1574                 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1575                 synchronize_irq(vec);
1576         } else {
1577                 synchronize_irq(netdev->irq);
1578         }
1579         be_irq_unregister(adapter);
1580
1581         napi_disable(&rx_eq->napi);
1582         napi_disable(&tx_eq->napi);
1583
1584         be_rx_queues_destroy(adapter);
1585         be_tx_queues_destroy(adapter);
1586
1587         be_cmd_if_destroy(ctrl, adapter->if_handle);
1588         return 0;
1589 }
1590
1591 static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
1592                                 void **ip_hdr, void **tcpudp_hdr,
1593                                 u64 *hdr_flags, void *priv)
1594 {
1595         struct ethhdr *eh;
1596         struct vlan_ethhdr *veh;
1597         struct iphdr *iph;
1598         u8 *va = page_address(frag->page) + frag->page_offset;
1599         unsigned long ll_hlen;
1600
1601         prefetch(va);
1602         eh = (struct ethhdr *)va;
1603         *mac_hdr = eh;
1604         ll_hlen = ETH_HLEN;
1605         if (eh->h_proto != htons(ETH_P_IP)) {
1606                 if (eh->h_proto == htons(ETH_P_8021Q)) {
1607                         veh = (struct vlan_ethhdr *)va;
1608                         if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
1609                                 return -1;
1610
1611                         ll_hlen += VLAN_HLEN;
1612                 } else {
1613                         return -1;
1614                 }
1615         }
1616         *hdr_flags = LRO_IPV4;
1617         iph = (struct iphdr *)(va + ll_hlen);
1618         *ip_hdr = iph;
1619         if (iph->protocol != IPPROTO_TCP)
1620                 return -1;
1621         *hdr_flags |= LRO_TCP;
1622         *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
1623
1624         return 0;
1625 }
1626
1627 static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
1628 {
1629         struct net_lro_mgr *lro_mgr;
1630
1631         lro_mgr = &adapter->rx_obj.lro_mgr;
1632         lro_mgr->dev = netdev;
1633         lro_mgr->features = LRO_F_NAPI;
1634         lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
1635         lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
1636         lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
1637         lro_mgr->lro_arr = adapter->rx_obj.lro_desc;
1638         lro_mgr->get_frag_header = be_get_frag_header;
1639         lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
1640 }
1641
1642 static struct net_device_ops be_netdev_ops = {
1643         .ndo_open               = be_open,
1644         .ndo_stop               = be_close,
1645         .ndo_start_xmit         = be_xmit,
1646         .ndo_get_stats          = be_get_stats,
1647         .ndo_set_rx_mode        = be_set_multicast_list,
1648         .ndo_set_mac_address    = be_mac_addr_set,
1649         .ndo_change_mtu         = be_change_mtu,
1650         .ndo_validate_addr      = eth_validate_addr,
1651         .ndo_vlan_rx_register   = be_vlan_register,
1652         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
1653         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
1654 };
1655
1656 static void be_netdev_init(struct net_device *netdev)
1657 {
1658         struct be_adapter *adapter = netdev_priv(netdev);
1659
1660         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1661                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
1662                 NETIF_F_IPV6_CSUM;
1663
1664         netdev->flags |= IFF_MULTICAST;
1665
1666         adapter->rx_csum = true;
1667
1668         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
1669
1670         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
1671
1672         be_lro_init(adapter, netdev);
1673
1674         netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1675                 BE_NAPI_WEIGHT);
1676         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
1677                 BE_NAPI_WEIGHT);
1678
1679         netif_carrier_off(netdev);
1680         netif_stop_queue(netdev);
1681 }
1682
1683 static void be_unmap_pci_bars(struct be_adapter *adapter)
1684 {
1685         struct be_ctrl_info *ctrl = &adapter->ctrl;
1686         if (ctrl->csr)
1687                 iounmap(ctrl->csr);
1688         if (ctrl->db)
1689                 iounmap(ctrl->db);
1690         if (ctrl->pcicfg)
1691                 iounmap(ctrl->pcicfg);
1692 }
1693
1694 static int be_map_pci_bars(struct be_adapter *adapter)
1695 {
1696         u8 __iomem *addr;
1697
1698         addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
1699                         pci_resource_len(adapter->pdev, 2));
1700         if (addr == NULL)
1701                 return -ENOMEM;
1702         adapter->ctrl.csr = addr;
1703
1704         addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
1705                         128 * 1024);
1706         if (addr == NULL)
1707                 goto pci_map_err;
1708         adapter->ctrl.db = addr;
1709
1710         addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
1711                         pci_resource_len(adapter->pdev, 1));
1712         if (addr == NULL)
1713                 goto pci_map_err;
1714         adapter->ctrl.pcicfg = addr;
1715
1716         return 0;
1717 pci_map_err:
1718         be_unmap_pci_bars(adapter);
1719         return -ENOMEM;
1720 }
1721
1722
1723 static void be_ctrl_cleanup(struct be_adapter *adapter)
1724 {
1725         struct be_dma_mem *mem = &adapter->ctrl.mbox_mem_alloced;
1726
1727         be_unmap_pci_bars(adapter);
1728
1729         if (mem->va)
1730                 pci_free_consistent(adapter->pdev, mem->size,
1731                         mem->va, mem->dma);
1732 }
1733
1734 /* Initialize the mbox required to send cmds to BE */
1735 static int be_ctrl_init(struct be_adapter *adapter)
1736 {
1737         struct be_ctrl_info *ctrl = &adapter->ctrl;
1738         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
1739         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
1740         int status;
1741         u32 val;
1742
1743         status = be_map_pci_bars(adapter);
1744         if (status)
1745                 return status;
1746
1747         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
1748         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
1749                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
1750         if (!mbox_mem_alloc->va) {
1751                 be_unmap_pci_bars(adapter);
1752                 return -1;
1753         }
1754         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
1755         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
1756         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
1757         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
1758         spin_lock_init(&ctrl->cmd_lock);
1759
1760         val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
1761         ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
1762                                         MEMBAR_CTRL_INT_CTRL_PFUNC_MASK;
1763         return 0;
1764 }
1765
1766 static void be_stats_cleanup(struct be_adapter *adapter)
1767 {
1768         struct be_stats_obj *stats = &adapter->stats;
1769         struct be_dma_mem *cmd = &stats->cmd;
1770
1771         if (cmd->va)
1772                 pci_free_consistent(adapter->pdev, cmd->size,
1773                         cmd->va, cmd->dma);
1774 }
1775
1776 static int be_stats_init(struct be_adapter *adapter)
1777 {
1778         struct be_stats_obj *stats = &adapter->stats;
1779         struct be_dma_mem *cmd = &stats->cmd;
1780
1781         cmd->size = sizeof(struct be_cmd_req_get_stats);
1782         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
1783         if (cmd->va == NULL)
1784                 return -1;
1785         return 0;
1786 }
1787
1788 static void __devexit be_remove(struct pci_dev *pdev)
1789 {
1790         struct be_adapter *adapter = pci_get_drvdata(pdev);
1791         if (!adapter)
1792                 return;
1793
1794         unregister_netdev(adapter->netdev);
1795
1796         be_stats_cleanup(adapter);
1797
1798         be_ctrl_cleanup(adapter);
1799
1800         if (adapter->msix_enabled) {
1801                 pci_disable_msix(adapter->pdev);
1802                 adapter->msix_enabled = false;
1803         }
1804
1805         pci_set_drvdata(pdev, NULL);
1806         pci_release_regions(pdev);
1807         pci_disable_device(pdev);
1808
1809         free_netdev(adapter->netdev);
1810 }
1811
1812 static int be_hw_up(struct be_adapter *adapter)
1813 {
1814         struct be_ctrl_info *ctrl = &adapter->ctrl;
1815         int status;
1816
1817         status = be_cmd_POST(ctrl);
1818         if (status)
1819                 return status;
1820
1821         status = be_cmd_get_fw_ver(ctrl, adapter->fw_ver);
1822         if (status)
1823                 return status;
1824
1825         status = be_cmd_query_fw_cfg(ctrl, &adapter->port_num);
1826         return status;
1827 }
1828
1829 static int __devinit be_probe(struct pci_dev *pdev,
1830                         const struct pci_device_id *pdev_id)
1831 {
1832         int status = 0;
1833         struct be_adapter *adapter;
1834         struct net_device *netdev;
1835         struct be_ctrl_info *ctrl;
1836         u8 mac[ETH_ALEN];
1837
1838         status = pci_enable_device(pdev);
1839         if (status)
1840                 goto do_none;
1841
1842         status = pci_request_regions(pdev, DRV_NAME);
1843         if (status)
1844                 goto disable_dev;
1845         pci_set_master(pdev);
1846
1847         netdev = alloc_etherdev(sizeof(struct be_adapter));
1848         if (netdev == NULL) {
1849                 status = -ENOMEM;
1850                 goto rel_reg;
1851         }
1852         adapter = netdev_priv(netdev);
1853         adapter->pdev = pdev;
1854         pci_set_drvdata(pdev, adapter);
1855         adapter->netdev = netdev;
1856
1857         be_msix_enable(adapter);
1858
1859         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1860         if (!status) {
1861                 netdev->features |= NETIF_F_HIGHDMA;
1862         } else {
1863                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1864                 if (status) {
1865                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
1866                         goto free_netdev;
1867                 }
1868         }
1869
1870         ctrl = &adapter->ctrl;
1871         status = be_ctrl_init(adapter);
1872         if (status)
1873                 goto free_netdev;
1874
1875         status = be_stats_init(adapter);
1876         if (status)
1877                 goto ctrl_clean;
1878
1879         status = be_hw_up(adapter);
1880         if (status)
1881                 goto stats_clean;
1882
1883         status = be_cmd_mac_addr_query(ctrl, mac, MAC_ADDRESS_TYPE_NETWORK,
1884                         true /* permanent */, 0);
1885         if (status)
1886                 goto stats_clean;
1887         memcpy(netdev->dev_addr, mac, ETH_ALEN);
1888
1889         INIT_DELAYED_WORK(&adapter->work, be_worker);
1890         be_netdev_init(netdev);
1891         SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
1892
1893         status = register_netdev(netdev);
1894         if (status != 0)
1895                 goto stats_clean;
1896
1897         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
1898         return 0;
1899
1900 stats_clean:
1901         be_stats_cleanup(adapter);
1902 ctrl_clean:
1903         be_ctrl_cleanup(adapter);
1904 free_netdev:
1905         free_netdev(adapter->netdev);
1906 rel_reg:
1907         pci_release_regions(pdev);
1908 disable_dev:
1909         pci_disable_device(pdev);
1910 do_none:
1911         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
1912         return status;
1913 }
1914
1915 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1916 {
1917         struct be_adapter *adapter = pci_get_drvdata(pdev);
1918         struct net_device *netdev =  adapter->netdev;
1919
1920         netif_device_detach(netdev);
1921         if (netif_running(netdev)) {
1922                 rtnl_lock();
1923                 be_close(netdev);
1924                 rtnl_unlock();
1925         }
1926
1927         pci_save_state(pdev);
1928         pci_disable_device(pdev);
1929         pci_set_power_state(pdev, pci_choose_state(pdev, state));
1930         return 0;
1931 }
1932
1933 static int be_resume(struct pci_dev *pdev)
1934 {
1935         int status = 0;
1936         struct be_adapter *adapter = pci_get_drvdata(pdev);
1937         struct net_device *netdev =  adapter->netdev;
1938
1939         netif_device_detach(netdev);
1940
1941         status = pci_enable_device(pdev);
1942         if (status)
1943                 return status;
1944
1945         pci_set_power_state(pdev, 0);
1946         pci_restore_state(pdev);
1947
1948         if (netif_running(netdev)) {
1949                 rtnl_lock();
1950                 be_open(netdev);
1951                 rtnl_unlock();
1952         }
1953         netif_device_attach(netdev);
1954         return 0;
1955 }
1956
1957 static struct pci_driver be_driver = {
1958         .name = DRV_NAME,
1959         .id_table = be_dev_ids,
1960         .probe = be_probe,
1961         .remove = be_remove,
1962         .suspend = be_suspend,
1963         .resume = be_resume
1964 };
1965
1966 static int __init be_init_module(void)
1967 {
1968         if (rx_frag_size != 8192 && rx_frag_size != 4096
1969                 && rx_frag_size != 2048) {
1970                 printk(KERN_WARNING DRV_NAME
1971                         " : Module param rx_frag_size must be 2048/4096/8192."
1972                         " Using 2048\n");
1973                 rx_frag_size = 2048;
1974         }
1975         /* Ensure rx_frag_size is aligned to chache line */
1976         if (SKB_DATA_ALIGN(rx_frag_size) != rx_frag_size) {
1977                 printk(KERN_WARNING DRV_NAME
1978                         " : Bad module param rx_frag_size. Using 2048\n");
1979                 rx_frag_size = 2048;
1980         }
1981
1982         return pci_register_driver(&be_driver);
1983 }
1984 module_init(be_init_module);
1985
1986 static void __exit be_exit_module(void)
1987 {
1988         pci_unregister_driver(&be_driver);
1989 }
1990 module_exit(be_exit_module);