2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size = 2048;
29 module_param(rx_frag_size, uint, S_IRUGO);
30 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
32 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
34 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
38 MODULE_DEVICE_TABLE(pci, be_dev_ids);
40 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
42 struct be_dma_mem *mem = &q->dma_mem;
44 pci_free_consistent(adapter->pdev, mem->size,
48 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
49 u16 len, u16 entry_size)
51 struct be_dma_mem *mem = &q->dma_mem;
53 memset(q, 0, sizeof(*q));
55 q->entry_size = entry_size;
56 mem->size = len * entry_size;
57 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
60 memset(mem->va, 0, mem->size);
64 static void be_intr_set(struct be_adapter *adapter, bool enable)
66 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
67 u32 reg = ioread32(addr);
68 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
70 if (!enabled && enable)
71 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
72 else if (enabled && !enable)
73 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
80 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
83 val |= qid & DB_RQ_RING_ID_MASK;
84 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
85 iowrite32(val, adapter->db + DB_RQ_OFFSET);
88 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
91 val |= qid & DB_TXULP_RING_ID_MASK;
92 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
93 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
96 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
97 bool arm, bool clear_int, u16 num_popped)
100 val |= qid & DB_EQ_RING_ID_MASK;
102 val |= 1 << DB_EQ_REARM_SHIFT;
104 val |= 1 << DB_EQ_CLR_SHIFT;
105 val |= 1 << DB_EQ_EVNT_SHIFT;
106 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
107 iowrite32(val, adapter->db + DB_EQ_OFFSET);
110 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
113 val |= qid & DB_CQ_RING_ID_MASK;
115 val |= 1 << DB_CQ_REARM_SHIFT;
116 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
117 iowrite32(val, adapter->db + DB_CQ_OFFSET);
120 static int be_mac_addr_set(struct net_device *netdev, void *p)
122 struct be_adapter *adapter = netdev_priv(netdev);
123 struct sockaddr *addr = p;
126 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
130 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
131 adapter->if_handle, &adapter->pmac_id);
133 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
138 static void netdev_stats_update(struct be_adapter *adapter)
140 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
141 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
142 struct be_port_rxf_stats *port_stats =
143 &rxf_stats->port[adapter->port_num];
144 struct net_device_stats *dev_stats = &adapter->stats.net_stats;
145 struct be_erx_stats *erx_stats = &hw_stats->erx;
147 dev_stats->rx_packets = port_stats->rx_total_frames;
148 dev_stats->tx_packets = port_stats->tx_unicastframes +
149 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
150 dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
151 (u64) port_stats->rx_bytes_lsd;
152 dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
153 (u64) port_stats->tx_bytes_lsd;
155 /* bad pkts received */
156 dev_stats->rx_errors = port_stats->rx_crc_errors +
157 port_stats->rx_alignment_symbol_errors +
158 port_stats->rx_in_range_errors +
159 port_stats->rx_out_range_errors +
160 port_stats->rx_frame_too_long +
161 port_stats->rx_dropped_too_small +
162 port_stats->rx_dropped_too_short +
163 port_stats->rx_dropped_header_too_small +
164 port_stats->rx_dropped_tcp_length +
165 port_stats->rx_dropped_runt +
166 port_stats->rx_tcp_checksum_errs +
167 port_stats->rx_ip_checksum_errs +
168 port_stats->rx_udp_checksum_errs;
170 /* no space in linux buffers: best possible approximation */
171 dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
173 /* detailed rx errors */
174 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
175 port_stats->rx_out_range_errors +
176 port_stats->rx_frame_too_long;
178 /* receive ring buffer overflow */
179 dev_stats->rx_over_errors = 0;
181 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
183 /* frame alignment errors */
184 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
186 /* receiver fifo overrun */
187 /* drops_no_pbuf is no per i/f, it's per BE card */
188 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
189 port_stats->rx_input_fifo_overflow +
190 rxf_stats->rx_drops_no_pbuf;
191 /* receiver missed packetd */
192 dev_stats->rx_missed_errors = 0;
194 /* packet transmit problems */
195 dev_stats->tx_errors = 0;
197 /* no space available in linux */
198 dev_stats->tx_dropped = 0;
200 dev_stats->multicast = port_stats->tx_multicastframes;
201 dev_stats->collisions = 0;
203 /* detailed tx_errors */
204 dev_stats->tx_aborted_errors = 0;
205 dev_stats->tx_carrier_errors = 0;
206 dev_stats->tx_fifo_errors = 0;
207 dev_stats->tx_heartbeat_errors = 0;
208 dev_stats->tx_window_errors = 0;
211 void be_link_status_update(struct be_adapter *adapter, bool link_up)
213 struct net_device *netdev = adapter->netdev;
215 /* If link came up or went down */
216 if (adapter->link_up != link_up) {
218 netif_start_queue(netdev);
219 netif_carrier_on(netdev);
220 printk(KERN_INFO "%s: Link up\n", netdev->name);
222 netif_stop_queue(netdev);
223 netif_carrier_off(netdev);
224 printk(KERN_INFO "%s: Link down\n", netdev->name);
226 adapter->link_up = link_up;
230 /* Update the EQ delay n BE based on the RX frags consumed / sec */
231 static void be_rx_eqd_update(struct be_adapter *adapter)
233 struct be_eq_obj *rx_eq = &adapter->rx_eq;
234 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
238 if (!rx_eq->enable_aic)
242 if (time_before(now, stats->rx_fps_jiffies)) {
243 stats->rx_fps_jiffies = now;
247 /* Update once a second */
248 if ((now - stats->rx_fps_jiffies) < HZ)
251 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
252 ((now - stats->rx_fps_jiffies) / HZ);
254 stats->rx_fps_jiffies = now;
255 stats->be_prev_rx_frags = stats->be_rx_frags;
256 eqd = stats->be_rx_fps / 110000;
258 if (eqd > rx_eq->max_eqd)
259 eqd = rx_eq->max_eqd;
260 if (eqd < rx_eq->min_eqd)
261 eqd = rx_eq->min_eqd;
264 if (eqd != rx_eq->cur_eqd)
265 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
267 rx_eq->cur_eqd = eqd;
270 static struct net_device_stats *be_get_stats(struct net_device *dev)
272 struct be_adapter *adapter = netdev_priv(dev);
274 return &adapter->stats.net_stats;
277 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
281 do_div(rate, ticks / HZ);
282 rate <<= 3; /* bytes/sec -> bits/sec */
283 do_div(rate, 1000000ul); /* MB/Sec */
288 static void be_tx_rate_update(struct be_adapter *adapter)
290 struct be_drvr_stats *stats = drvr_stats(adapter);
293 /* Wrapped around? */
294 if (time_before(now, stats->be_tx_jiffies)) {
295 stats->be_tx_jiffies = now;
299 /* Update tx rate once in two seconds */
300 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
301 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
302 - stats->be_tx_bytes_prev,
303 now - stats->be_tx_jiffies);
304 stats->be_tx_jiffies = now;
305 stats->be_tx_bytes_prev = stats->be_tx_bytes;
309 static void be_tx_stats_update(struct be_adapter *adapter,
310 u32 wrb_cnt, u32 copied, bool stopped)
312 struct be_drvr_stats *stats = drvr_stats(adapter);
314 stats->be_tx_wrbs += wrb_cnt;
315 stats->be_tx_bytes += copied;
317 stats->be_tx_stops++;
320 /* Determine number of WRB entries needed to xmit data in an skb */
321 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
323 int cnt = (skb->len > skb->data_len);
325 cnt += skb_shinfo(skb)->nr_frags;
327 /* to account for hdr wrb */
330 /* add a dummy to make it an even num */
335 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
339 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
341 wrb->frag_pa_hi = upper_32_bits(addr);
342 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
343 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
346 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
347 bool vlan, u32 wrb_cnt, u32 len)
349 memset(hdr, 0, sizeof(*hdr));
351 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
353 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
354 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
355 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
356 hdr, skb_shinfo(skb)->gso_size);
357 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
359 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
360 else if (is_udp_pkt(skb))
361 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
364 if (vlan && vlan_tx_tag_present(skb)) {
365 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
366 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
367 hdr, vlan_tx_tag_get(skb));
370 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
371 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
372 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
373 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
377 static int make_tx_wrbs(struct be_adapter *adapter,
378 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
382 struct pci_dev *pdev = adapter->pdev;
383 struct sk_buff *first_skb = skb;
384 struct be_queue_info *txq = &adapter->tx_obj.q;
385 struct be_eth_wrb *wrb;
386 struct be_eth_hdr_wrb *hdr;
388 atomic_add(wrb_cnt, &txq->used);
389 hdr = queue_head_node(txq);
392 if (skb->len > skb->data_len) {
393 int len = skb->len - skb->data_len;
394 busaddr = pci_map_single(pdev, skb->data, len,
396 wrb = queue_head_node(txq);
397 wrb_fill(wrb, busaddr, len);
398 be_dws_cpu_to_le(wrb, sizeof(*wrb));
403 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
404 struct skb_frag_struct *frag =
405 &skb_shinfo(skb)->frags[i];
406 busaddr = pci_map_page(pdev, frag->page,
408 frag->size, PCI_DMA_TODEVICE);
409 wrb = queue_head_node(txq);
410 wrb_fill(wrb, busaddr, frag->size);
411 be_dws_cpu_to_le(wrb, sizeof(*wrb));
413 copied += frag->size;
417 wrb = queue_head_node(txq);
419 be_dws_cpu_to_le(wrb, sizeof(*wrb));
423 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
425 be_dws_cpu_to_le(hdr, sizeof(*hdr));
430 static netdev_tx_t be_xmit(struct sk_buff *skb,
431 struct net_device *netdev)
433 struct be_adapter *adapter = netdev_priv(netdev);
434 struct be_tx_obj *tx_obj = &adapter->tx_obj;
435 struct be_queue_info *txq = &tx_obj->q;
436 u32 wrb_cnt = 0, copied = 0;
437 u32 start = txq->head;
438 bool dummy_wrb, stopped = false;
440 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
442 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
444 /* record the sent skb in the sent_skb table */
445 BUG_ON(tx_obj->sent_skb_list[start]);
446 tx_obj->sent_skb_list[start] = skb;
448 /* Ensure that txq has space for the next skb; Else stop the queue
449 * *BEFORE* ringing the tx doorbell, so that we serialze the
450 * tx compls of the current transmit which'll wake up the queue
452 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) {
453 netif_stop_queue(netdev);
457 be_txq_notify(adapter, txq->id, wrb_cnt);
459 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
463 static int be_change_mtu(struct net_device *netdev, int new_mtu)
465 struct be_adapter *adapter = netdev_priv(netdev);
466 if (new_mtu < BE_MIN_MTU ||
467 new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
468 dev_info(&adapter->pdev->dev,
469 "MTU must be between %d and %d bytes\n",
470 BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
473 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
474 netdev->mtu, new_mtu);
475 netdev->mtu = new_mtu;
480 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
481 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
482 * set the BE in promiscuous VLAN mode.
484 static void be_vid_config(struct net_device *netdev)
486 struct be_adapter *adapter = netdev_priv(netdev);
487 u16 vtag[BE_NUM_VLANS_SUPPORTED];
490 if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
491 /* Construct VLAN Table to give to HW */
492 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
493 if (adapter->vlan_tag[i]) {
494 vtag[ntags] = cpu_to_le16(i);
498 be_cmd_vlan_config(adapter, adapter->if_handle,
501 be_cmd_vlan_config(adapter, adapter->if_handle,
506 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
508 struct be_adapter *adapter = netdev_priv(netdev);
509 struct be_eq_obj *rx_eq = &adapter->rx_eq;
510 struct be_eq_obj *tx_eq = &adapter->tx_eq;
512 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
513 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
514 adapter->vlan_grp = grp;
515 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
516 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
519 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
521 struct be_adapter *adapter = netdev_priv(netdev);
523 adapter->num_vlans++;
524 adapter->vlan_tag[vid] = 1;
526 be_vid_config(netdev);
529 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
531 struct be_adapter *adapter = netdev_priv(netdev);
533 adapter->num_vlans--;
534 adapter->vlan_tag[vid] = 0;
536 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
537 be_vid_config(netdev);
540 static void be_set_multicast_list(struct net_device *netdev)
542 struct be_adapter *adapter = netdev_priv(netdev);
544 if (netdev->flags & IFF_PROMISC) {
545 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
546 adapter->promiscuous = true;
550 /* BE was previously in promiscous mode; disable it */
551 if (adapter->promiscuous) {
552 adapter->promiscuous = false;
553 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
556 if (netdev->flags & IFF_ALLMULTI) {
557 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0);
561 be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
567 static void be_rx_rate_update(struct be_adapter *adapter)
569 struct be_drvr_stats *stats = drvr_stats(adapter);
573 if (time_before(now, stats->be_rx_jiffies)) {
574 stats->be_rx_jiffies = now;
578 /* Update the rate once in two seconds */
579 if ((now - stats->be_rx_jiffies) < 2 * HZ)
582 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
583 - stats->be_rx_bytes_prev,
584 now - stats->be_rx_jiffies);
585 stats->be_rx_jiffies = now;
586 stats->be_rx_bytes_prev = stats->be_rx_bytes;
589 static void be_rx_stats_update(struct be_adapter *adapter,
590 u32 pktsize, u16 numfrags)
592 struct be_drvr_stats *stats = drvr_stats(adapter);
594 stats->be_rx_compl++;
595 stats->be_rx_frags += numfrags;
596 stats->be_rx_bytes += pktsize;
599 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
601 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
603 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
604 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
605 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
607 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
608 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
610 ipv6_chk = (ip_version && (tcpf || udpf));
612 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
615 static struct be_rx_page_info *
616 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
618 struct be_rx_page_info *rx_page_info;
619 struct be_queue_info *rxq = &adapter->rx_obj.q;
621 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
622 BUG_ON(!rx_page_info->page);
624 if (rx_page_info->last_page_user)
625 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
626 adapter->big_page_size, PCI_DMA_FROMDEVICE);
628 atomic_dec(&rxq->used);
632 /* Throwaway the data in the Rx completion */
633 static void be_rx_compl_discard(struct be_adapter *adapter,
634 struct be_eth_rx_compl *rxcp)
636 struct be_queue_info *rxq = &adapter->rx_obj.q;
637 struct be_rx_page_info *page_info;
638 u16 rxq_idx, i, num_rcvd;
640 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
641 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
643 for (i = 0; i < num_rcvd; i++) {
644 page_info = get_rx_page_info(adapter, rxq_idx);
645 put_page(page_info->page);
646 memset(page_info, 0, sizeof(*page_info));
647 index_inc(&rxq_idx, rxq->len);
652 * skb_fill_rx_data forms a complete skb for an ether frame
655 static void skb_fill_rx_data(struct be_adapter *adapter,
656 struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
658 struct be_queue_info *rxq = &adapter->rx_obj.q;
659 struct be_rx_page_info *page_info;
660 u16 rxq_idx, i, num_rcvd, j;
661 u32 pktsize, hdr_len, curr_frag_len, size;
664 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
665 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
666 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
668 page_info = get_rx_page_info(adapter, rxq_idx);
670 start = page_address(page_info->page) + page_info->page_offset;
673 /* Copy data in the first descriptor of this completion */
674 curr_frag_len = min(pktsize, rx_frag_size);
676 /* Copy the header portion into skb_data */
677 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
678 memcpy(skb->data, start, hdr_len);
679 skb->len = curr_frag_len;
680 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
681 /* Complete packet has now been moved to data */
682 put_page(page_info->page);
684 skb->tail += curr_frag_len;
686 skb_shinfo(skb)->nr_frags = 1;
687 skb_shinfo(skb)->frags[0].page = page_info->page;
688 skb_shinfo(skb)->frags[0].page_offset =
689 page_info->page_offset + hdr_len;
690 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
691 skb->data_len = curr_frag_len - hdr_len;
692 skb->tail += hdr_len;
694 memset(page_info, 0, sizeof(*page_info));
696 if (pktsize <= rx_frag_size) {
697 BUG_ON(num_rcvd != 1);
701 /* More frags present for this completion */
703 for (i = 1, j = 0; i < num_rcvd; i++) {
704 size -= curr_frag_len;
705 index_inc(&rxq_idx, rxq->len);
706 page_info = get_rx_page_info(adapter, rxq_idx);
708 curr_frag_len = min(size, rx_frag_size);
710 /* Coalesce all frags from the same physical page in one slot */
711 if (page_info->page_offset == 0) {
714 skb_shinfo(skb)->frags[j].page = page_info->page;
715 skb_shinfo(skb)->frags[j].page_offset =
716 page_info->page_offset;
717 skb_shinfo(skb)->frags[j].size = 0;
718 skb_shinfo(skb)->nr_frags++;
720 put_page(page_info->page);
723 skb_shinfo(skb)->frags[j].size += curr_frag_len;
724 skb->len += curr_frag_len;
725 skb->data_len += curr_frag_len;
727 memset(page_info, 0, sizeof(*page_info));
729 BUG_ON(j > MAX_SKB_FRAGS);
732 be_rx_stats_update(adapter, pktsize, num_rcvd);
736 /* Process the RX completion indicated by rxcp when GRO is disabled */
737 static void be_rx_compl_process(struct be_adapter *adapter,
738 struct be_eth_rx_compl *rxcp)
743 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
745 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
748 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
749 be_rx_compl_discard(adapter, rxcp);
753 skb_reserve(skb, NET_IP_ALIGN);
755 skb_fill_rx_data(adapter, skb, rxcp);
757 if (do_pkt_csum(rxcp, adapter->rx_csum))
758 skb->ip_summed = CHECKSUM_NONE;
760 skb->ip_summed = CHECKSUM_UNNECESSARY;
762 skb->truesize = skb->len + sizeof(struct sk_buff);
763 skb->protocol = eth_type_trans(skb, adapter->netdev);
764 skb->dev = adapter->netdev;
767 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
771 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
772 vid = be16_to_cpu(vid);
773 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
775 netif_receive_skb(skb);
781 /* Process the RX completion indicated by rxcp when GRO is enabled */
782 static void be_rx_compl_process_gro(struct be_adapter *adapter,
783 struct be_eth_rx_compl *rxcp)
785 struct be_rx_page_info *page_info;
786 struct sk_buff *skb = NULL;
787 struct be_queue_info *rxq = &adapter->rx_obj.q;
788 struct be_eq_obj *eq_obj = &adapter->rx_eq;
789 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
790 u16 i, rxq_idx = 0, vid, j;
792 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
793 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
794 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
795 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
797 skb = napi_get_frags(&eq_obj->napi);
799 be_rx_compl_discard(adapter, rxcp);
803 remaining = pkt_size;
804 for (i = 0, j = -1; i < num_rcvd; i++) {
805 page_info = get_rx_page_info(adapter, rxq_idx);
807 curr_frag_len = min(remaining, rx_frag_size);
809 /* Coalesce all frags from the same physical page in one slot */
810 if (i == 0 || page_info->page_offset == 0) {
811 /* First frag or Fresh page */
813 skb_shinfo(skb)->frags[j].page = page_info->page;
814 skb_shinfo(skb)->frags[j].page_offset =
815 page_info->page_offset;
816 skb_shinfo(skb)->frags[j].size = 0;
818 put_page(page_info->page);
820 skb_shinfo(skb)->frags[j].size += curr_frag_len;
822 remaining -= curr_frag_len;
823 index_inc(&rxq_idx, rxq->len);
824 memset(page_info, 0, sizeof(*page_info));
826 BUG_ON(j > MAX_SKB_FRAGS);
828 skb_shinfo(skb)->nr_frags = j + 1;
830 skb->data_len = pkt_size;
831 skb->truesize += pkt_size;
832 skb->ip_summed = CHECKSUM_UNNECESSARY;
834 if (likely(!vlanf)) {
835 napi_gro_frags(&eq_obj->napi);
837 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
838 vid = be16_to_cpu(vid);
840 if (!adapter->vlan_grp || adapter->num_vlans == 0)
843 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
846 be_rx_stats_update(adapter, pkt_size, num_rcvd);
850 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
852 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
854 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
857 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
859 queue_tail_inc(&adapter->rx_obj.cq);
863 /* To reset the valid bit, we need to reset the whole word as
864 * when walking the queue the valid entries are little-endian
865 * and invalid entries are host endian
867 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
869 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
872 static inline struct page *be_alloc_pages(u32 size)
874 gfp_t alloc_flags = GFP_ATOMIC;
875 u32 order = get_order(size);
877 alloc_flags |= __GFP_COMP;
878 return alloc_pages(alloc_flags, order);
882 * Allocate a page, split it to fragments of size rx_frag_size and post as
883 * receive buffers to BE
885 static void be_post_rx_frags(struct be_adapter *adapter)
887 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
888 struct be_rx_page_info *page_info = NULL;
889 struct be_queue_info *rxq = &adapter->rx_obj.q;
890 struct page *pagep = NULL;
891 struct be_eth_rx_d *rxd;
892 u64 page_dmaaddr = 0, frag_dmaaddr;
893 u32 posted, page_offset = 0;
895 page_info = &page_info_tbl[rxq->head];
896 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
898 pagep = be_alloc_pages(adapter->big_page_size);
899 if (unlikely(!pagep)) {
900 drvr_stats(adapter)->be_ethrx_post_fail++;
903 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
904 adapter->big_page_size,
906 page_info->page_offset = 0;
909 page_info->page_offset = page_offset + rx_frag_size;
911 page_offset = page_info->page_offset;
912 page_info->page = pagep;
913 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
914 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
916 rxd = queue_head_node(rxq);
917 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
918 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
921 /* Any space left in the current big page for another frag? */
922 if ((page_offset + rx_frag_size + rx_frag_size) >
923 adapter->big_page_size) {
925 page_info->last_page_user = true;
927 page_info = &page_info_tbl[rxq->head];
930 page_info->last_page_user = true;
933 atomic_add(posted, &rxq->used);
934 be_rxq_notify(adapter, rxq->id, posted);
935 } else if (atomic_read(&rxq->used) == 0) {
936 /* Let be_worker replenish when memory is available */
937 adapter->rx_post_starved = true;
943 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
945 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
947 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
950 be_dws_le_to_cpu(txcp, sizeof(*txcp));
952 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
954 queue_tail_inc(tx_cq);
958 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
960 struct be_queue_info *txq = &adapter->tx_obj.q;
961 struct be_eth_wrb *wrb;
962 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
963 struct sk_buff *sent_skb;
965 u16 cur_index, num_wrbs = 0;
967 cur_index = txq->tail;
968 sent_skb = sent_skbs[cur_index];
970 sent_skbs[cur_index] = NULL;
973 cur_index = txq->tail;
974 wrb = queue_tail_node(txq);
975 be_dws_le_to_cpu(wrb, sizeof(*wrb));
976 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
978 pci_unmap_single(adapter->pdev, busaddr,
979 wrb->frag_len, PCI_DMA_TODEVICE);
983 } while (cur_index != last_index);
985 atomic_sub(num_wrbs, &txq->used);
990 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
992 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
997 eqe->evt = le32_to_cpu(eqe->evt);
998 queue_tail_inc(&eq_obj->q);
1002 static int event_handle(struct be_adapter *adapter,
1003 struct be_eq_obj *eq_obj)
1005 struct be_eq_entry *eqe;
1008 while ((eqe = event_get(eq_obj)) != NULL) {
1013 /* Deal with any spurious interrupts that come
1016 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1018 napi_schedule(&eq_obj->napi);
1023 /* Just read and notify events without processing them.
1024 * Used at the time of destroying event queues */
1025 static void be_eq_clean(struct be_adapter *adapter,
1026 struct be_eq_obj *eq_obj)
1028 struct be_eq_entry *eqe;
1031 while ((eqe = event_get(eq_obj)) != NULL) {
1037 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1040 static void be_rx_q_clean(struct be_adapter *adapter)
1042 struct be_rx_page_info *page_info;
1043 struct be_queue_info *rxq = &adapter->rx_obj.q;
1044 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1045 struct be_eth_rx_compl *rxcp;
1048 /* First cleanup pending rx completions */
1049 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1050 be_rx_compl_discard(adapter, rxcp);
1051 be_rx_compl_reset(rxcp);
1052 be_cq_notify(adapter, rx_cq->id, true, 1);
1055 /* Then free posted rx buffer that were not used */
1056 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1057 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1058 page_info = get_rx_page_info(adapter, tail);
1059 put_page(page_info->page);
1060 memset(page_info, 0, sizeof(*page_info));
1062 BUG_ON(atomic_read(&rxq->used));
1065 static void be_tx_compl_clean(struct be_adapter *adapter)
1067 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1068 struct be_queue_info *txq = &adapter->tx_obj.q;
1069 struct be_eth_tx_compl *txcp;
1070 u16 end_idx, cmpl = 0, timeo = 0;
1072 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1074 while ((txcp = be_tx_compl_get(tx_cq))) {
1075 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1077 be_tx_compl_process(adapter, end_idx);
1081 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1085 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1091 if (atomic_read(&txq->used))
1092 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1093 atomic_read(&txq->used));
1096 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1098 struct be_queue_info *q;
1100 q = &adapter->mcc_obj.q;
1102 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1103 be_queue_free(adapter, q);
1105 q = &adapter->mcc_obj.cq;
1107 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1108 be_queue_free(adapter, q);
1111 /* Must be called only after TX qs are created as MCC shares TX EQ */
1112 static int be_mcc_queues_create(struct be_adapter *adapter)
1114 struct be_queue_info *q, *cq;
1116 /* Alloc MCC compl queue */
1117 cq = &adapter->mcc_obj.cq;
1118 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1119 sizeof(struct be_mcc_compl)))
1122 /* Ask BE to create MCC compl queue; share TX's eq */
1123 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1126 /* Alloc MCC queue */
1127 q = &adapter->mcc_obj.q;
1128 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1129 goto mcc_cq_destroy;
1131 /* Ask BE to create MCC queue */
1132 if (be_cmd_mccq_create(adapter, q, cq))
1138 be_queue_free(adapter, q);
1140 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1142 be_queue_free(adapter, cq);
1147 static void be_tx_queues_destroy(struct be_adapter *adapter)
1149 struct be_queue_info *q;
1151 q = &adapter->tx_obj.q;
1153 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1154 be_queue_free(adapter, q);
1156 q = &adapter->tx_obj.cq;
1158 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1159 be_queue_free(adapter, q);
1161 /* Clear any residual events */
1162 be_eq_clean(adapter, &adapter->tx_eq);
1164 q = &adapter->tx_eq.q;
1166 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1167 be_queue_free(adapter, q);
1170 static int be_tx_queues_create(struct be_adapter *adapter)
1172 struct be_queue_info *eq, *q, *cq;
1174 adapter->tx_eq.max_eqd = 0;
1175 adapter->tx_eq.min_eqd = 0;
1176 adapter->tx_eq.cur_eqd = 96;
1177 adapter->tx_eq.enable_aic = false;
1178 /* Alloc Tx Event queue */
1179 eq = &adapter->tx_eq.q;
1180 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1183 /* Ask BE to create Tx Event queue */
1184 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1186 /* Alloc TX eth compl queue */
1187 cq = &adapter->tx_obj.cq;
1188 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1189 sizeof(struct be_eth_tx_compl)))
1192 /* Ask BE to create Tx eth compl queue */
1193 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1196 /* Alloc TX eth queue */
1197 q = &adapter->tx_obj.q;
1198 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1201 /* Ask BE to create Tx eth queue */
1202 if (be_cmd_txq_create(adapter, q, cq))
1207 be_queue_free(adapter, q);
1209 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1211 be_queue_free(adapter, cq);
1213 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1215 be_queue_free(adapter, eq);
1219 static void be_rx_queues_destroy(struct be_adapter *adapter)
1221 struct be_queue_info *q;
1223 q = &adapter->rx_obj.q;
1225 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1226 be_rx_q_clean(adapter);
1228 be_queue_free(adapter, q);
1230 q = &adapter->rx_obj.cq;
1232 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1233 be_queue_free(adapter, q);
1235 /* Clear any residual events */
1236 be_eq_clean(adapter, &adapter->rx_eq);
1238 q = &adapter->rx_eq.q;
1240 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1241 be_queue_free(adapter, q);
1244 static int be_rx_queues_create(struct be_adapter *adapter)
1246 struct be_queue_info *eq, *q, *cq;
1249 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1250 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1251 adapter->rx_eq.min_eqd = 0;
1252 adapter->rx_eq.cur_eqd = 0;
1253 adapter->rx_eq.enable_aic = true;
1255 /* Alloc Rx Event queue */
1256 eq = &adapter->rx_eq.q;
1257 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1258 sizeof(struct be_eq_entry));
1262 /* Ask BE to create Rx Event queue */
1263 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
1267 /* Alloc RX eth compl queue */
1268 cq = &adapter->rx_obj.cq;
1269 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1270 sizeof(struct be_eth_rx_compl));
1274 /* Ask BE to create Rx eth compl queue */
1275 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1279 /* Alloc RX eth queue */
1280 q = &adapter->rx_obj.q;
1281 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1285 /* Ask BE to create Rx eth queue */
1286 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1287 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1293 be_queue_free(adapter, q);
1295 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1297 be_queue_free(adapter, cq);
1299 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1301 be_queue_free(adapter, eq);
1305 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1306 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1308 return eq_id - 8 * be_pci_func(adapter);
1311 static irqreturn_t be_intx(int irq, void *dev)
1313 struct be_adapter *adapter = dev;
1316 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1317 be_pci_func(adapter) * CEV_ISR_SIZE);
1321 event_handle(adapter, &adapter->tx_eq);
1322 event_handle(adapter, &adapter->rx_eq);
1327 static irqreturn_t be_msix_rx(int irq, void *dev)
1329 struct be_adapter *adapter = dev;
1331 event_handle(adapter, &adapter->rx_eq);
1336 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1338 struct be_adapter *adapter = dev;
1340 event_handle(adapter, &adapter->tx_eq);
1345 static inline bool do_gro(struct be_adapter *adapter,
1346 struct be_eth_rx_compl *rxcp)
1348 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1349 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1352 drvr_stats(adapter)->be_rxcp_err++;
1354 return (tcp_frame && !err) ? true : false;
1357 int be_poll_rx(struct napi_struct *napi, int budget)
1359 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1360 struct be_adapter *adapter =
1361 container_of(rx_eq, struct be_adapter, rx_eq);
1362 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1363 struct be_eth_rx_compl *rxcp;
1366 for (work_done = 0; work_done < budget; work_done++) {
1367 rxcp = be_rx_compl_get(adapter);
1371 if (do_gro(adapter, rxcp))
1372 be_rx_compl_process_gro(adapter, rxcp);
1374 be_rx_compl_process(adapter, rxcp);
1376 be_rx_compl_reset(rxcp);
1379 /* Refill the queue */
1380 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1381 be_post_rx_frags(adapter);
1384 if (work_done < budget) {
1385 napi_complete(napi);
1386 be_cq_notify(adapter, rx_cq->id, true, work_done);
1388 /* More to be consumed; continue with interrupts disabled */
1389 be_cq_notify(adapter, rx_cq->id, false, work_done);
1394 void be_process_tx(struct be_adapter *adapter)
1396 struct be_queue_info *txq = &adapter->tx_obj.q;
1397 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1398 struct be_eth_tx_compl *txcp;
1402 while ((txcp = be_tx_compl_get(tx_cq))) {
1403 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1405 be_tx_compl_process(adapter, end_idx);
1410 be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
1412 /* As Tx wrbs have been freed up, wake up netdev queue if
1413 * it was stopped due to lack of tx wrbs.
1415 if (netif_queue_stopped(adapter->netdev) &&
1416 atomic_read(&txq->used) < txq->len / 2) {
1417 netif_wake_queue(adapter->netdev);
1420 drvr_stats(adapter)->be_tx_events++;
1421 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1425 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1426 * For TX/MCC we don't honour budget; consume everything
1428 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1430 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1431 struct be_adapter *adapter =
1432 container_of(tx_eq, struct be_adapter, tx_eq);
1434 napi_complete(napi);
1436 be_process_tx(adapter);
1438 be_process_mcc(adapter);
1443 static void be_worker(struct work_struct *work)
1445 struct be_adapter *adapter =
1446 container_of(work, struct be_adapter, work.work);
1450 status = be_cmd_get_stats(adapter, &adapter->stats.cmd);
1452 netdev_stats_update(adapter);
1455 be_rx_eqd_update(adapter);
1457 be_tx_rate_update(adapter);
1458 be_rx_rate_update(adapter);
1460 if (adapter->rx_post_starved) {
1461 adapter->rx_post_starved = false;
1462 be_post_rx_frags(adapter);
1465 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1468 static void be_msix_enable(struct be_adapter *adapter)
1472 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1473 adapter->msix_entries[i].entry = i;
1475 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1476 BE_NUM_MSIX_VECTORS);
1478 adapter->msix_enabled = true;
1482 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1484 return adapter->msix_entries[
1485 be_evt_bit_get(adapter, eq_id)].vector;
1488 static int be_request_irq(struct be_adapter *adapter,
1489 struct be_eq_obj *eq_obj,
1490 void *handler, char *desc)
1492 struct net_device *netdev = adapter->netdev;
1495 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1496 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1497 return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1500 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1502 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1503 free_irq(vec, adapter);
1506 static int be_msix_register(struct be_adapter *adapter)
1510 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
1514 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1521 be_free_irq(adapter, &adapter->tx_eq);
1523 dev_warn(&adapter->pdev->dev,
1524 "MSIX Request IRQ failed - err %d\n", status);
1525 pci_disable_msix(adapter->pdev);
1526 adapter->msix_enabled = false;
1530 static int be_irq_register(struct be_adapter *adapter)
1532 struct net_device *netdev = adapter->netdev;
1535 if (adapter->msix_enabled) {
1536 status = be_msix_register(adapter);
1542 netdev->irq = adapter->pdev->irq;
1543 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1546 dev_err(&adapter->pdev->dev,
1547 "INTx request IRQ failed - err %d\n", status);
1551 adapter->isr_registered = true;
1555 static void be_irq_unregister(struct be_adapter *adapter)
1557 struct net_device *netdev = adapter->netdev;
1559 if (!adapter->isr_registered)
1563 if (!adapter->msix_enabled) {
1564 free_irq(netdev->irq, adapter);
1569 be_free_irq(adapter, &adapter->tx_eq);
1570 be_free_irq(adapter, &adapter->rx_eq);
1572 adapter->isr_registered = false;
1576 static int be_open(struct net_device *netdev)
1578 struct be_adapter *adapter = netdev_priv(netdev);
1579 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1580 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1584 /* First time posting */
1585 be_post_rx_frags(adapter);
1587 napi_enable(&rx_eq->napi);
1588 napi_enable(&tx_eq->napi);
1590 be_irq_register(adapter);
1592 be_intr_set(adapter, true);
1594 /* The evt queues are created in unarmed state; arm them */
1595 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
1596 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1598 /* Rx compl queue may be in unarmed state; rearm it */
1599 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1601 status = be_cmd_link_status_query(adapter, &link_up);
1604 be_link_status_update(adapter, link_up);
1606 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1610 static int be_setup(struct be_adapter *adapter)
1612 struct net_device *netdev = adapter->netdev;
1616 if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
1617 BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
1618 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1619 status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr,
1620 false/* pmac_invalid */, &adapter->if_handle,
1625 be_vid_config(netdev);
1627 status = be_cmd_set_flow_control(adapter, true, true);
1631 status = be_tx_queues_create(adapter);
1635 status = be_rx_queues_create(adapter);
1639 status = be_mcc_queues_create(adapter);
1646 be_rx_queues_destroy(adapter);
1648 be_tx_queues_destroy(adapter);
1650 be_cmd_if_destroy(adapter, adapter->if_handle);
1655 static int be_clear(struct be_adapter *adapter)
1657 be_mcc_queues_destroy(adapter);
1658 be_rx_queues_destroy(adapter);
1659 be_tx_queues_destroy(adapter);
1661 be_cmd_if_destroy(adapter, adapter->if_handle);
1666 static int be_close(struct net_device *netdev)
1668 struct be_adapter *adapter = netdev_priv(netdev);
1669 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1670 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1673 cancel_delayed_work_sync(&adapter->work);
1675 netif_stop_queue(netdev);
1676 netif_carrier_off(netdev);
1677 adapter->link_up = false;
1679 be_intr_set(adapter, false);
1681 if (adapter->msix_enabled) {
1682 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1683 synchronize_irq(vec);
1684 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1685 synchronize_irq(vec);
1687 synchronize_irq(netdev->irq);
1689 be_irq_unregister(adapter);
1691 napi_disable(&rx_eq->napi);
1692 napi_disable(&tx_eq->napi);
1694 /* Wait for all pending tx completions to arrive so that
1695 * all tx skbs are freed.
1697 be_tx_compl_clean(adapter);
1702 static struct net_device_ops be_netdev_ops = {
1703 .ndo_open = be_open,
1704 .ndo_stop = be_close,
1705 .ndo_start_xmit = be_xmit,
1706 .ndo_get_stats = be_get_stats,
1707 .ndo_set_rx_mode = be_set_multicast_list,
1708 .ndo_set_mac_address = be_mac_addr_set,
1709 .ndo_change_mtu = be_change_mtu,
1710 .ndo_validate_addr = eth_validate_addr,
1711 .ndo_vlan_rx_register = be_vlan_register,
1712 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
1713 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
1716 static void be_netdev_init(struct net_device *netdev)
1718 struct be_adapter *adapter = netdev_priv(netdev);
1720 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1721 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
1722 NETIF_F_IPV6_CSUM | NETIF_F_GRO;
1724 netdev->flags |= IFF_MULTICAST;
1726 adapter->rx_csum = true;
1728 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
1730 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
1732 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1734 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
1737 netif_carrier_off(netdev);
1738 netif_stop_queue(netdev);
1741 static void be_unmap_pci_bars(struct be_adapter *adapter)
1744 iounmap(adapter->csr);
1746 iounmap(adapter->db);
1747 if (adapter->pcicfg)
1748 iounmap(adapter->pcicfg);
1751 static int be_map_pci_bars(struct be_adapter *adapter)
1755 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
1756 pci_resource_len(adapter->pdev, 2));
1759 adapter->csr = addr;
1761 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
1767 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
1768 pci_resource_len(adapter->pdev, 1));
1771 adapter->pcicfg = addr;
1775 be_unmap_pci_bars(adapter);
1780 static void be_ctrl_cleanup(struct be_adapter *adapter)
1782 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
1784 be_unmap_pci_bars(adapter);
1787 pci_free_consistent(adapter->pdev, mem->size,
1791 static int be_ctrl_init(struct be_adapter *adapter)
1793 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
1794 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
1797 status = be_map_pci_bars(adapter);
1801 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
1802 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
1803 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
1804 if (!mbox_mem_alloc->va) {
1805 be_unmap_pci_bars(adapter);
1808 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
1809 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
1810 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
1811 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
1812 spin_lock_init(&adapter->mbox_lock);
1813 spin_lock_init(&adapter->mcc_lock);
1814 spin_lock_init(&adapter->mcc_cq_lock);
1819 static void be_stats_cleanup(struct be_adapter *adapter)
1821 struct be_stats_obj *stats = &adapter->stats;
1822 struct be_dma_mem *cmd = &stats->cmd;
1825 pci_free_consistent(adapter->pdev, cmd->size,
1829 static int be_stats_init(struct be_adapter *adapter)
1831 struct be_stats_obj *stats = &adapter->stats;
1832 struct be_dma_mem *cmd = &stats->cmd;
1834 cmd->size = sizeof(struct be_cmd_req_get_stats);
1835 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
1836 if (cmd->va == NULL)
1841 static void __devexit be_remove(struct pci_dev *pdev)
1843 struct be_adapter *adapter = pci_get_drvdata(pdev);
1847 unregister_netdev(adapter->netdev);
1851 be_stats_cleanup(adapter);
1853 be_ctrl_cleanup(adapter);
1855 if (adapter->msix_enabled) {
1856 pci_disable_msix(adapter->pdev);
1857 adapter->msix_enabled = false;
1860 pci_set_drvdata(pdev, NULL);
1861 pci_release_regions(pdev);
1862 pci_disable_device(pdev);
1864 free_netdev(adapter->netdev);
1867 static int be_hw_up(struct be_adapter *adapter)
1871 status = be_cmd_POST(adapter);
1875 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
1879 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num);
1883 static int __devinit be_probe(struct pci_dev *pdev,
1884 const struct pci_device_id *pdev_id)
1887 struct be_adapter *adapter;
1888 struct net_device *netdev;
1891 status = pci_enable_device(pdev);
1895 status = pci_request_regions(pdev, DRV_NAME);
1898 pci_set_master(pdev);
1900 netdev = alloc_etherdev(sizeof(struct be_adapter));
1901 if (netdev == NULL) {
1905 adapter = netdev_priv(netdev);
1906 adapter->pdev = pdev;
1907 pci_set_drvdata(pdev, adapter);
1908 adapter->netdev = netdev;
1910 be_msix_enable(adapter);
1912 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1914 netdev->features |= NETIF_F_HIGHDMA;
1916 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1918 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
1923 status = be_ctrl_init(adapter);
1927 status = be_cmd_reset_function(adapter);
1931 status = be_stats_init(adapter);
1935 status = be_hw_up(adapter);
1939 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
1940 true /* permanent */, 0);
1943 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1945 INIT_DELAYED_WORK(&adapter->work, be_worker);
1946 be_netdev_init(netdev);
1947 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
1949 status = be_setup(adapter);
1952 status = register_netdev(netdev);
1956 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
1962 be_stats_cleanup(adapter);
1964 be_ctrl_cleanup(adapter);
1966 free_netdev(adapter->netdev);
1968 pci_release_regions(pdev);
1970 pci_disable_device(pdev);
1972 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
1976 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1978 struct be_adapter *adapter = pci_get_drvdata(pdev);
1979 struct net_device *netdev = adapter->netdev;
1981 netif_device_detach(netdev);
1982 if (netif_running(netdev)) {
1989 pci_save_state(pdev);
1990 pci_disable_device(pdev);
1991 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1995 static int be_resume(struct pci_dev *pdev)
1998 struct be_adapter *adapter = pci_get_drvdata(pdev);
1999 struct net_device *netdev = adapter->netdev;
2001 netif_device_detach(netdev);
2003 status = pci_enable_device(pdev);
2007 pci_set_power_state(pdev, 0);
2008 pci_restore_state(pdev);
2011 if (netif_running(netdev)) {
2016 netif_device_attach(netdev);
2020 static struct pci_driver be_driver = {
2022 .id_table = be_dev_ids,
2024 .remove = be_remove,
2025 .suspend = be_suspend,
2029 static int __init be_init_module(void)
2031 if (rx_frag_size != 8192 && rx_frag_size != 4096
2032 && rx_frag_size != 2048) {
2033 printk(KERN_WARNING DRV_NAME
2034 " : Module param rx_frag_size must be 2048/4096/8192."
2036 rx_frag_size = 2048;
2039 return pci_register_driver(&be_driver);
2041 module_init(be_init_module);
2043 static void __exit be_exit_module(void)
2045 pci_unregister_driver(&be_driver);
2047 module_exit(be_exit_module);