2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
43 #include "firmware_exports.h"
47 #define SGE_RX_SM_BUF_SIZE 1536
49 #define SGE_RX_COPY_THRES 256
50 #define SGE_RX_PULL_LEN 128
53 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
57 #define FL0_PG_CHUNK_SIZE 2048
58 #define FL0_PG_ORDER 0
59 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
60 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
62 #define SGE_RX_DROP_THRES 16
65 * Period of the Tx buffer reclaim timer. This timer does not need to run
66 * frequently as Tx buffers are usually reclaimed by new Tx packets.
68 #define TX_RECLAIM_PERIOD (HZ / 4)
70 /* WR size in bytes */
71 #define WR_LEN (WR_FLITS * 8)
74 * Types of Tx queues in each queue set. Order here matters, do not change.
76 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
78 /* Values for sge_txq.flags */
80 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
81 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
85 __be64 flit[TX_DESC_FLITS];
95 struct tx_sw_desc { /* SW state per Tx descriptor */
97 u8 eop; /* set if last descriptor for packet */
98 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
99 u8 fragidx; /* first page fragment associated with descriptor */
100 s8 sflit; /* start flit of first SGL entry in descriptor */
103 struct rx_sw_desc { /* SW state per Rx descriptor */
106 struct fl_pg_chunk pg_chunk;
108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
111 struct rsp_desc { /* response queue descriptor */
112 struct rss_header rss_hdr;
120 * Holds unmapping information for Tx packets that need deferred unmapping.
121 * This structure lives at skb->head and must be allocated by callers.
123 struct deferred_unmap_info {
124 struct pci_dev *pdev;
125 dma_addr_t addr[MAX_SKB_FRAGS + 1];
129 * Maps a number of flits to the number of Tx descriptors that can hold them.
132 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
134 * HW allows up to 4 descriptors to be combined into a WR.
136 static u8 flit_desc_map[] = {
138 #if SGE_NUM_GENBITS == 1
139 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
140 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
141 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
142 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
143 #elif SGE_NUM_GENBITS == 2
144 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
145 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
146 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
147 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
149 # error "SGE_NUM_GENBITS must be 1 or 2"
153 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
155 return container_of(q, struct sge_qset, fl[qidx]);
158 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
160 return container_of(q, struct sge_qset, rspq);
163 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
165 return container_of(q, struct sge_qset, txq[qidx]);
169 * refill_rspq - replenish an SGE response queue
170 * @adapter: the adapter
171 * @q: the response queue to replenish
172 * @credits: how many new responses to make available
174 * Replenishes a response queue by making the supplied number of responses
177 static inline void refill_rspq(struct adapter *adapter,
178 const struct sge_rspq *q, unsigned int credits)
181 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
182 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
186 * need_skb_unmap - does the platform need unmapping of sk_buffs?
188 * Returns true if the platfrom needs sk_buff unmapping. The compiler
189 * optimizes away unecessary code if this returns true.
191 static inline int need_skb_unmap(void)
194 * This structure is used to tell if the platfrom needs buffer
195 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
198 DECLARE_PCI_UNMAP_ADDR(addr);
201 return sizeof(struct dummy) != 0;
205 * unmap_skb - unmap a packet main body and its page fragments
207 * @q: the Tx queue containing Tx descriptors for the packet
208 * @cidx: index of Tx descriptor
209 * @pdev: the PCI device
211 * Unmap the main body of an sk_buff and its page fragments, if any.
212 * Because of the fairly complicated structure of our SGLs and the desire
213 * to conserve space for metadata, the information necessary to unmap an
214 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
215 * descriptors (the physical addresses of the various data buffers), and
216 * the SW descriptor state (assorted indices). The send functions
217 * initialize the indices for the first packet descriptor so we can unmap
218 * the buffers held in the first Tx descriptor here, and we have enough
219 * information at this point to set the state for the next Tx descriptor.
221 * Note that it is possible to clean up the first descriptor of a packet
222 * before the send routines have written the next descriptors, but this
223 * race does not cause any problem. We just end up writing the unmapping
224 * info for the descriptor first.
226 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
227 unsigned int cidx, struct pci_dev *pdev)
229 const struct sg_ent *sgp;
230 struct tx_sw_desc *d = &q->sdesc[cidx];
231 int nfrags, frag_idx, curflit, j = d->addr_idx;
233 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
234 frag_idx = d->fragidx;
236 if (frag_idx == 0 && skb_headlen(skb)) {
237 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
238 skb_headlen(skb), PCI_DMA_TODEVICE);
242 curflit = d->sflit + 1 + j;
243 nfrags = skb_shinfo(skb)->nr_frags;
245 while (frag_idx < nfrags && curflit < WR_FLITS) {
246 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
247 skb_shinfo(skb)->frags[frag_idx].size,
258 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
259 d = cidx + 1 == q->size ? q->sdesc : d + 1;
260 d->fragidx = frag_idx;
262 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
267 * free_tx_desc - reclaims Tx descriptors and their buffers
268 * @adapter: the adapter
269 * @q: the Tx queue to reclaim descriptors from
270 * @n: the number of descriptors to reclaim
272 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
273 * Tx buffers. Called with the Tx queue lock held.
275 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
278 struct tx_sw_desc *d;
279 struct pci_dev *pdev = adapter->pdev;
280 unsigned int cidx = q->cidx;
282 const int need_unmap = need_skb_unmap() &&
283 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
287 if (d->skb) { /* an SGL is present */
289 unmap_skb(d->skb, q, cidx, pdev);
294 if (++cidx == q->size) {
303 * reclaim_completed_tx - reclaims completed Tx descriptors
304 * @adapter: the adapter
305 * @q: the Tx queue to reclaim completed descriptors from
307 * Reclaims Tx descriptors that the SGE has indicated it has processed,
308 * and frees the associated buffers if possible. Called with the Tx
311 static inline void reclaim_completed_tx(struct adapter *adapter,
314 unsigned int reclaim = q->processed - q->cleaned;
317 free_tx_desc(adapter, q, reclaim);
318 q->cleaned += reclaim;
319 q->in_use -= reclaim;
324 * should_restart_tx - are there enough resources to restart a Tx queue?
327 * Checks if there are enough descriptors to restart a suspended Tx queue.
329 static inline int should_restart_tx(const struct sge_txq *q)
331 unsigned int r = q->processed - q->cleaned;
333 return q->in_use - r < (q->size >> 1);
337 * free_rx_bufs - free the Rx buffers on an SGE free list
338 * @pdev: the PCI device associated with the adapter
339 * @rxq: the SGE free list to clean up
341 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
342 * this queue should be stopped before calling this function.
344 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
346 unsigned int cidx = q->cidx;
348 while (q->credits--) {
349 struct rx_sw_desc *d = &q->sdesc[cidx];
351 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
352 q->buf_size, PCI_DMA_FROMDEVICE);
354 put_page(d->pg_chunk.page);
355 d->pg_chunk.page = NULL;
360 if (++cidx == q->size)
364 if (q->pg_chunk.page) {
365 __free_pages(q->pg_chunk.page, q->order);
366 q->pg_chunk.page = NULL;
371 * add_one_rx_buf - add a packet buffer to a free-buffer list
372 * @va: buffer start VA
373 * @len: the buffer length
374 * @d: the HW Rx descriptor to write
375 * @sd: the SW Rx descriptor to write
376 * @gen: the generation bit value
377 * @pdev: the PCI device associated with the adapter
379 * Add a buffer of the given length to the supplied HW and SW Rx
382 static inline int add_one_rx_buf(void *va, unsigned int len,
383 struct rx_desc *d, struct rx_sw_desc *sd,
384 unsigned int gen, struct pci_dev *pdev)
388 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
389 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
392 pci_unmap_addr_set(sd, dma_addr, mapping);
394 d->addr_lo = cpu_to_be32(mapping);
395 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
397 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
398 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
402 static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
405 if (!q->pg_chunk.page) {
406 q->pg_chunk.page = alloc_pages(gfp, order);
407 if (unlikely(!q->pg_chunk.page))
409 q->pg_chunk.va = page_address(q->pg_chunk.page);
410 q->pg_chunk.offset = 0;
412 sd->pg_chunk = q->pg_chunk;
414 q->pg_chunk.offset += q->buf_size;
415 if (q->pg_chunk.offset == (PAGE_SIZE << order))
416 q->pg_chunk.page = NULL;
418 q->pg_chunk.va += q->buf_size;
419 get_page(q->pg_chunk.page);
425 * refill_fl - refill an SGE free-buffer list
426 * @adapter: the adapter
427 * @q: the free-list to refill
428 * @n: the number of new buffers to allocate
429 * @gfp: the gfp flags for allocating new buffers
431 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
432 * allocated with the supplied gfp flags. The caller must assure that
433 * @n does not exceed the queue's capacity.
435 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
438 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
439 struct rx_desc *d = &q->desc[q->pidx];
440 unsigned int count = 0;
446 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
447 nomem: q->alloc_failed++;
450 buf_start = sd->pg_chunk.va;
452 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
458 buf_start = skb->data;
461 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
473 if (++q->pidx == q->size) {
484 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
489 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
491 refill_fl(adap, fl, min(16U, fl->size - fl->credits),
492 GFP_ATOMIC | __GFP_COMP);
496 * recycle_rx_buf - recycle a receive buffer
497 * @adapter: the adapter
498 * @q: the SGE free list
499 * @idx: index of buffer to recycle
501 * Recycles the specified buffer on the given free list by adding it at
502 * the next available slot on the list.
504 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
507 struct rx_desc *from = &q->desc[idx];
508 struct rx_desc *to = &q->desc[q->pidx];
510 q->sdesc[q->pidx] = q->sdesc[idx];
511 to->addr_lo = from->addr_lo; /* already big endian */
512 to->addr_hi = from->addr_hi; /* likewise */
514 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
515 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
518 if (++q->pidx == q->size) {
522 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
526 * alloc_ring - allocate resources for an SGE descriptor ring
527 * @pdev: the PCI device
528 * @nelem: the number of descriptors
529 * @elem_size: the size of each descriptor
530 * @sw_size: the size of the SW state associated with each ring element
531 * @phys: the physical address of the allocated ring
532 * @metadata: address of the array holding the SW state for the ring
534 * Allocates resources for an SGE descriptor ring, such as Tx queues,
535 * free buffer lists, or response queues. Each SGE ring requires
536 * space for its HW descriptors plus, optionally, space for the SW state
537 * associated with each HW entry (the metadata). The function returns
538 * three values: the virtual address for the HW ring (the return value
539 * of the function), the physical address of the HW ring, and the address
542 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
543 size_t sw_size, dma_addr_t * phys, void *metadata)
545 size_t len = nelem * elem_size;
547 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
552 s = kcalloc(nelem, sw_size, GFP_KERNEL);
555 dma_free_coherent(&pdev->dev, len, p, *phys);
560 *(void **)metadata = s;
566 * t3_reset_qset - reset a sge qset
569 * Reset the qset structure.
570 * the NAPI structure is preserved in the event of
571 * the qset's reincarnation, for example during EEH recovery.
573 static void t3_reset_qset(struct sge_qset *q)
576 !(q->adap->flags & NAPI_INIT)) {
577 memset(q, 0, sizeof(*q));
582 memset(&q->rspq, 0, sizeof(q->rspq));
583 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
584 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
586 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
587 kfree(q->lro_frag_tbl);
588 q->lro_nfrags = q->lro_frag_len = 0;
593 * free_qset - free the resources of an SGE queue set
594 * @adapter: the adapter owning the queue set
597 * Release the HW and SW resources associated with an SGE queue set, such
598 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
599 * queue set must be quiesced prior to calling this.
601 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
604 struct pci_dev *pdev = adapter->pdev;
606 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
608 spin_lock_irq(&adapter->sge.reg_lock);
609 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
610 spin_unlock_irq(&adapter->sge.reg_lock);
611 free_rx_bufs(pdev, &q->fl[i]);
612 kfree(q->fl[i].sdesc);
613 dma_free_coherent(&pdev->dev,
615 sizeof(struct rx_desc), q->fl[i].desc,
619 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
620 if (q->txq[i].desc) {
621 spin_lock_irq(&adapter->sge.reg_lock);
622 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
623 spin_unlock_irq(&adapter->sge.reg_lock);
624 if (q->txq[i].sdesc) {
625 free_tx_desc(adapter, &q->txq[i],
627 kfree(q->txq[i].sdesc);
629 dma_free_coherent(&pdev->dev,
631 sizeof(struct tx_desc),
632 q->txq[i].desc, q->txq[i].phys_addr);
633 __skb_queue_purge(&q->txq[i].sendq);
637 spin_lock_irq(&adapter->sge.reg_lock);
638 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
639 spin_unlock_irq(&adapter->sge.reg_lock);
640 dma_free_coherent(&pdev->dev,
641 q->rspq.size * sizeof(struct rsp_desc),
642 q->rspq.desc, q->rspq.phys_addr);
649 * init_qset_cntxt - initialize an SGE queue set context info
651 * @id: the queue set id
653 * Initializes the TIDs and context ids for the queues of a queue set.
655 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
657 qs->rspq.cntxt_id = id;
658 qs->fl[0].cntxt_id = 2 * id;
659 qs->fl[1].cntxt_id = 2 * id + 1;
660 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
661 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
662 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
663 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
664 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
668 * sgl_len - calculates the size of an SGL of the given capacity
669 * @n: the number of SGL entries
671 * Calculates the number of flits needed for a scatter/gather list that
672 * can hold the given number of entries.
674 static inline unsigned int sgl_len(unsigned int n)
676 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
677 return (3 * n) / 2 + (n & 1);
681 * flits_to_desc - returns the num of Tx descriptors for the given flits
682 * @n: the number of flits
684 * Calculates the number of Tx descriptors needed for the supplied number
687 static inline unsigned int flits_to_desc(unsigned int n)
689 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
690 return flit_desc_map[n];
694 * get_packet - return the next ingress packet buffer from a free list
695 * @adap: the adapter that received the packet
696 * @fl: the SGE free list holding the packet
697 * @len: the packet length including any SGE padding
698 * @drop_thres: # of remaining buffers before we start dropping packets
700 * Get the next packet from a free list and complete setup of the
701 * sk_buff. If the packet is small we make a copy and recycle the
702 * original buffer, otherwise we use the original buffer itself. If a
703 * positive drop threshold is supplied packets are dropped and their
704 * buffers recycled if (a) the number of remaining buffers is under the
705 * threshold and the packet is too big to copy, or (b) the packet should
706 * be copied but there is no memory for the copy.
708 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
709 unsigned int len, unsigned int drop_thres)
711 struct sk_buff *skb = NULL;
712 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
714 prefetch(sd->skb->data);
717 if (len <= SGE_RX_COPY_THRES) {
718 skb = alloc_skb(len, GFP_ATOMIC);
719 if (likely(skb != NULL)) {
721 pci_dma_sync_single_for_cpu(adap->pdev,
722 pci_unmap_addr(sd, dma_addr), len,
724 memcpy(skb->data, sd->skb->data, len);
725 pci_dma_sync_single_for_device(adap->pdev,
726 pci_unmap_addr(sd, dma_addr), len,
728 } else if (!drop_thres)
731 recycle_rx_buf(adap, fl, fl->cidx);
735 if (unlikely(fl->credits < drop_thres))
739 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
740 fl->buf_size, PCI_DMA_FROMDEVICE);
743 __refill_fl(adap, fl);
748 * get_packet_pg - return the next ingress packet buffer from a free list
749 * @adap: the adapter that received the packet
750 * @fl: the SGE free list holding the packet
751 * @len: the packet length including any SGE padding
752 * @drop_thres: # of remaining buffers before we start dropping packets
754 * Get the next packet from a free list populated with page chunks.
755 * If the packet is small we make a copy and recycle the original buffer,
756 * otherwise we attach the original buffer as a page fragment to a fresh
757 * sk_buff. If a positive drop threshold is supplied packets are dropped
758 * and their buffers recycled if (a) the number of remaining buffers is
759 * under the threshold and the packet is too big to copy, or (b) there's
762 * Note: this function is similar to @get_packet but deals with Rx buffers
763 * that are page chunks rather than sk_buffs.
765 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
766 struct sge_rspq *q, unsigned int len,
767 unsigned int drop_thres)
769 struct sk_buff *newskb, *skb;
770 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
772 newskb = skb = q->pg_skb;
774 if (!skb && (len <= SGE_RX_COPY_THRES)) {
775 newskb = alloc_skb(len, GFP_ATOMIC);
776 if (likely(newskb != NULL)) {
777 __skb_put(newskb, len);
778 pci_dma_sync_single_for_cpu(adap->pdev,
779 pci_unmap_addr(sd, dma_addr), len,
781 memcpy(newskb->data, sd->pg_chunk.va, len);
782 pci_dma_sync_single_for_device(adap->pdev,
783 pci_unmap_addr(sd, dma_addr), len,
785 } else if (!drop_thres)
789 recycle_rx_buf(adap, fl, fl->cidx);
794 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
798 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
799 if (unlikely(!newskb)) {
805 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
806 fl->buf_size, PCI_DMA_FROMDEVICE);
808 __skb_put(newskb, SGE_RX_PULL_LEN);
809 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
810 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
811 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
812 len - SGE_RX_PULL_LEN);
814 newskb->data_len = len - SGE_RX_PULL_LEN;
816 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
818 sd->pg_chunk.offset, len);
820 newskb->data_len += len;
822 newskb->truesize += newskb->data_len;
826 * We do not refill FLs here, we let the caller do it to overlap a
833 * get_imm_packet - return the next ingress packet buffer from a response
834 * @resp: the response descriptor containing the packet data
836 * Return a packet containing the immediate data of the given response.
838 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
840 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
843 __skb_put(skb, IMMED_PKT_SIZE);
844 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
850 * calc_tx_descs - calculate the number of Tx descriptors for a packet
853 * Returns the number of Tx descriptors needed for the given Ethernet
854 * packet. Ethernet packets require addition of WR and CPL headers.
856 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
860 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
863 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
864 if (skb_shinfo(skb)->gso_size)
866 return flits_to_desc(flits);
870 * make_sgl - populate a scatter/gather list for a packet
872 * @sgp: the SGL to populate
873 * @start: start address of skb main body data to include in the SGL
874 * @len: length of skb main body data to include in the SGL
875 * @pdev: the PCI device
877 * Generates a scatter/gather list for the buffers that make up a packet
878 * and returns the SGL size in 8-byte words. The caller must size the SGL
881 static inline unsigned int make_sgl(const struct sk_buff *skb,
882 struct sg_ent *sgp, unsigned char *start,
883 unsigned int len, struct pci_dev *pdev)
886 unsigned int i, j = 0, nfrags;
889 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
890 sgp->len[0] = cpu_to_be32(len);
891 sgp->addr[0] = cpu_to_be64(mapping);
895 nfrags = skb_shinfo(skb)->nr_frags;
896 for (i = 0; i < nfrags; i++) {
897 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
899 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
900 frag->size, PCI_DMA_TODEVICE);
901 sgp->len[j] = cpu_to_be32(frag->size);
902 sgp->addr[j] = cpu_to_be64(mapping);
909 return ((nfrags + (len != 0)) * 3) / 2 + j;
913 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
917 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
918 * where the HW is going to sleep just after we checked, however,
919 * then the interrupt handler will detect the outstanding TX packet
920 * and ring the doorbell for us.
922 * When GTS is disabled we unconditionally ring the doorbell.
924 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
927 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
928 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
929 set_bit(TXQ_LAST_PKT_DB, &q->flags);
930 t3_write_reg(adap, A_SG_KDOORBELL,
931 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
934 wmb(); /* write descriptors before telling HW */
935 t3_write_reg(adap, A_SG_KDOORBELL,
936 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
940 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
942 #if SGE_NUM_GENBITS == 2
943 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
948 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
949 * @ndesc: number of Tx descriptors spanned by the SGL
950 * @skb: the packet corresponding to the WR
951 * @d: first Tx descriptor to be written
952 * @pidx: index of above descriptors
953 * @q: the SGE Tx queue
955 * @flits: number of flits to the start of the SGL in the first descriptor
956 * @sgl_flits: the SGL size in flits
957 * @gen: the Tx descriptor generation
958 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
959 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
961 * Write a work request header and an associated SGL. If the SGL is
962 * small enough to fit into one Tx descriptor it has already been written
963 * and we just need to write the WR header. Otherwise we distribute the
964 * SGL across the number of descriptors it spans.
966 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
967 struct tx_desc *d, unsigned int pidx,
968 const struct sge_txq *q,
969 const struct sg_ent *sgl,
970 unsigned int flits, unsigned int sgl_flits,
971 unsigned int gen, __be32 wr_hi,
974 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
975 struct tx_sw_desc *sd = &q->sdesc[pidx];
978 if (need_skb_unmap()) {
984 if (likely(ndesc == 1)) {
986 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
987 V_WR_SGLSFLT(flits)) | wr_hi;
989 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
990 V_WR_GEN(gen)) | wr_lo;
993 unsigned int ogen = gen;
994 const u64 *fp = (const u64 *)sgl;
995 struct work_request_hdr *wp = wrp;
997 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
998 V_WR_SGLSFLT(flits)) | wr_hi;
1001 unsigned int avail = WR_FLITS - flits;
1003 if (avail > sgl_flits)
1005 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1015 if (++pidx == q->size) {
1023 wrp = (struct work_request_hdr *)d;
1024 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1025 V_WR_SGLSFLT(1)) | wr_hi;
1026 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1028 V_WR_GEN(gen)) | wr_lo;
1033 wrp->wr_hi |= htonl(F_WR_EOP);
1035 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1036 wr_gen2((struct tx_desc *)wp, ogen);
1037 WARN_ON(ndesc != 0);
1042 * write_tx_pkt_wr - write a TX_PKT work request
1043 * @adap: the adapter
1044 * @skb: the packet to send
1045 * @pi: the egress interface
1046 * @pidx: index of the first Tx descriptor to write
1047 * @gen: the generation value to use
1049 * @ndesc: number of descriptors the packet will occupy
1050 * @compl: the value of the COMPL bit to use
1052 * Generate a TX_PKT work request to send the supplied packet.
1054 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1055 const struct port_info *pi,
1056 unsigned int pidx, unsigned int gen,
1057 struct sge_txq *q, unsigned int ndesc,
1060 unsigned int flits, sgl_flits, cntrl, tso_info;
1061 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1062 struct tx_desc *d = &q->desc[pidx];
1063 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1065 cpl->len = htonl(skb->len | 0x80000000);
1066 cntrl = V_TXPKT_INTF(pi->port_id);
1068 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1069 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1071 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1074 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1077 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1078 hdr->cntrl = htonl(cntrl);
1079 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1080 CPL_ETH_II : CPL_ETH_II_VLAN;
1081 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1082 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1083 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1084 hdr->lso_info = htonl(tso_info);
1087 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1088 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1089 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1090 cpl->cntrl = htonl(cntrl);
1092 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1093 q->sdesc[pidx].skb = NULL;
1095 skb_copy_from_linear_data(skb, &d->flit[2],
1098 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1100 flits = (skb->len + 7) / 8 + 2;
1101 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1102 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1103 | F_WR_SOP | F_WR_EOP | compl);
1105 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1106 V_WR_TID(q->token));
1115 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1116 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1118 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1119 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1120 htonl(V_WR_TID(q->token)));
1123 static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
1126 netif_stop_queue(dev);
1127 set_bit(TXQ_ETH, &qs->txq_stopped);
1132 * eth_xmit - add a packet to the Ethernet Tx queue
1134 * @dev: the egress net device
1136 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1138 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1140 unsigned int ndesc, pidx, credits, gen, compl;
1141 const struct port_info *pi = netdev_priv(dev);
1142 struct adapter *adap = pi->adapter;
1143 struct sge_qset *qs = pi->qs;
1144 struct sge_txq *q = &qs->txq[TXQ_ETH];
1147 * The chip min packet length is 9 octets but play safe and reject
1148 * anything shorter than an Ethernet header.
1150 if (unlikely(skb->len < ETH_HLEN)) {
1152 return NETDEV_TX_OK;
1155 spin_lock(&q->lock);
1156 reclaim_completed_tx(adap, q);
1158 credits = q->size - q->in_use;
1159 ndesc = calc_tx_descs(skb);
1161 if (unlikely(credits < ndesc)) {
1162 t3_stop_queue(dev, qs, q);
1163 dev_err(&adap->pdev->dev,
1164 "%s: Tx ring %u full while queue awake!\n",
1165 dev->name, q->cntxt_id & 7);
1166 spin_unlock(&q->lock);
1167 return NETDEV_TX_BUSY;
1171 if (unlikely(credits - ndesc < q->stop_thres)) {
1172 t3_stop_queue(dev, qs, q);
1174 if (should_restart_tx(q) &&
1175 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1177 netif_wake_queue(dev);
1182 q->unacked += ndesc;
1183 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1187 if (q->pidx >= q->size) {
1192 /* update port statistics */
1193 if (skb->ip_summed == CHECKSUM_COMPLETE)
1194 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1195 if (skb_shinfo(skb)->gso_size)
1196 qs->port_stats[SGE_PSTAT_TSO]++;
1197 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1198 qs->port_stats[SGE_PSTAT_VLANINS]++;
1200 dev->trans_start = jiffies;
1201 spin_unlock(&q->lock);
1204 * We do not use Tx completion interrupts to free DMAd Tx packets.
1205 * This is good for performamce but means that we rely on new Tx
1206 * packets arriving to run the destructors of completed packets,
1207 * which open up space in their sockets' send queues. Sometimes
1208 * we do not get such new packets causing Tx to stall. A single
1209 * UDP transmitter is a good example of this situation. We have
1210 * a clean up timer that periodically reclaims completed packets
1211 * but it doesn't run often enough (nor do we want it to) to prevent
1212 * lengthy stalls. A solution to this problem is to run the
1213 * destructor early, after the packet is queued but before it's DMAd.
1214 * A cons is that we lie to socket memory accounting, but the amount
1215 * of extra memory is reasonable (limited by the number of Tx
1216 * descriptors), the packets do actually get freed quickly by new
1217 * packets almost always, and for protocols like TCP that wait for
1218 * acks to really free up the data the extra memory is even less.
1219 * On the positive side we run the destructors on the sending CPU
1220 * rather than on a potentially different completing CPU, usually a
1221 * good thing. We also run them without holding our Tx queue lock,
1222 * unlike what reclaim_completed_tx() would otherwise do.
1224 * Run the destructor before telling the DMA engine about the packet
1225 * to make sure it doesn't complete and get freed prematurely.
1227 if (likely(!skb_shared(skb)))
1230 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1231 check_ring_tx_db(adap, q);
1232 return NETDEV_TX_OK;
1236 * write_imm - write a packet into a Tx descriptor as immediate data
1237 * @d: the Tx descriptor to write
1239 * @len: the length of packet data to write as immediate data
1240 * @gen: the generation bit value to write
1242 * Writes a packet as immediate data into a Tx descriptor. The packet
1243 * contains a work request at its beginning. We must write the packet
1244 * carefully so the SGE doesn't read it accidentally before it's written
1247 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1248 unsigned int len, unsigned int gen)
1250 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1251 struct work_request_hdr *to = (struct work_request_hdr *)d;
1253 if (likely(!skb->data_len))
1254 memcpy(&to[1], &from[1], len - sizeof(*from));
1256 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1258 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1259 V_WR_BCNTLFLT(len & 7));
1261 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1262 V_WR_LEN((len + 7) / 8));
1268 * check_desc_avail - check descriptor availability on a send queue
1269 * @adap: the adapter
1270 * @q: the send queue
1271 * @skb: the packet needing the descriptors
1272 * @ndesc: the number of Tx descriptors needed
1273 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1275 * Checks if the requested number of Tx descriptors is available on an
1276 * SGE send queue. If the queue is already suspended or not enough
1277 * descriptors are available the packet is queued for later transmission.
1278 * Must be called with the Tx queue locked.
1280 * Returns 0 if enough descriptors are available, 1 if there aren't
1281 * enough descriptors and the packet has been queued, and 2 if the caller
1282 * needs to retry because there weren't enough descriptors at the
1283 * beginning of the call but some freed up in the mean time.
1285 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1286 struct sk_buff *skb, unsigned int ndesc,
1289 if (unlikely(!skb_queue_empty(&q->sendq))) {
1290 addq_exit:__skb_queue_tail(&q->sendq, skb);
1293 if (unlikely(q->size - q->in_use < ndesc)) {
1294 struct sge_qset *qs = txq_to_qset(q, qid);
1296 set_bit(qid, &qs->txq_stopped);
1297 smp_mb__after_clear_bit();
1299 if (should_restart_tx(q) &&
1300 test_and_clear_bit(qid, &qs->txq_stopped))
1310 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1311 * @q: the SGE control Tx queue
1313 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1314 * that send only immediate data (presently just the control queues) and
1315 * thus do not have any sk_buffs to release.
1317 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1319 unsigned int reclaim = q->processed - q->cleaned;
1321 q->in_use -= reclaim;
1322 q->cleaned += reclaim;
1325 static inline int immediate(const struct sk_buff *skb)
1327 return skb->len <= WR_LEN;
1331 * ctrl_xmit - send a packet through an SGE control Tx queue
1332 * @adap: the adapter
1333 * @q: the control queue
1336 * Send a packet through an SGE control Tx queue. Packets sent through
1337 * a control queue must fit entirely as immediate data in a single Tx
1338 * descriptor and have no page fragments.
1340 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1341 struct sk_buff *skb)
1344 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1346 if (unlikely(!immediate(skb))) {
1349 return NET_XMIT_SUCCESS;
1352 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1353 wrp->wr_lo = htonl(V_WR_TID(q->token));
1355 spin_lock(&q->lock);
1356 again:reclaim_completed_tx_imm(q);
1358 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1359 if (unlikely(ret)) {
1361 spin_unlock(&q->lock);
1367 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1370 if (++q->pidx >= q->size) {
1374 spin_unlock(&q->lock);
1376 t3_write_reg(adap, A_SG_KDOORBELL,
1377 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1378 return NET_XMIT_SUCCESS;
1382 * restart_ctrlq - restart a suspended control queue
1383 * @qs: the queue set cotaining the control queue
1385 * Resumes transmission on a suspended Tx control queue.
1387 static void restart_ctrlq(unsigned long data)
1389 struct sk_buff *skb;
1390 struct sge_qset *qs = (struct sge_qset *)data;
1391 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1393 spin_lock(&q->lock);
1394 again:reclaim_completed_tx_imm(q);
1396 while (q->in_use < q->size &&
1397 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1399 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1401 if (++q->pidx >= q->size) {
1408 if (!skb_queue_empty(&q->sendq)) {
1409 set_bit(TXQ_CTRL, &qs->txq_stopped);
1410 smp_mb__after_clear_bit();
1412 if (should_restart_tx(q) &&
1413 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1418 spin_unlock(&q->lock);
1420 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1421 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1425 * Send a management message through control queue 0
1427 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1431 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1438 * deferred_unmap_destructor - unmap a packet when it is freed
1441 * This is the packet destructor used for Tx packets that need to remain
1442 * mapped until they are freed rather than until their Tx descriptors are
1445 static void deferred_unmap_destructor(struct sk_buff *skb)
1448 const dma_addr_t *p;
1449 const struct skb_shared_info *si;
1450 const struct deferred_unmap_info *dui;
1452 dui = (struct deferred_unmap_info *)skb->head;
1455 if (skb->tail - skb->transport_header)
1456 pci_unmap_single(dui->pdev, *p++,
1457 skb->tail - skb->transport_header,
1460 si = skb_shinfo(skb);
1461 for (i = 0; i < si->nr_frags; i++)
1462 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1466 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1467 const struct sg_ent *sgl, int sgl_flits)
1470 struct deferred_unmap_info *dui;
1472 dui = (struct deferred_unmap_info *)skb->head;
1474 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1475 *p++ = be64_to_cpu(sgl->addr[0]);
1476 *p++ = be64_to_cpu(sgl->addr[1]);
1479 *p = be64_to_cpu(sgl->addr[0]);
1483 * write_ofld_wr - write an offload work request
1484 * @adap: the adapter
1485 * @skb: the packet to send
1487 * @pidx: index of the first Tx descriptor to write
1488 * @gen: the generation value to use
1489 * @ndesc: number of descriptors the packet will occupy
1491 * Write an offload work request to send the supplied packet. The packet
1492 * data already carry the work request with most fields populated.
1494 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1495 struct sge_txq *q, unsigned int pidx,
1496 unsigned int gen, unsigned int ndesc)
1498 unsigned int sgl_flits, flits;
1499 struct work_request_hdr *from;
1500 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1501 struct tx_desc *d = &q->desc[pidx];
1503 if (immediate(skb)) {
1504 q->sdesc[pidx].skb = NULL;
1505 write_imm(d, skb, skb->len, gen);
1509 /* Only TX_DATA builds SGLs */
1511 from = (struct work_request_hdr *)skb->data;
1512 memcpy(&d->flit[1], &from[1],
1513 skb_transport_offset(skb) - sizeof(*from));
1515 flits = skb_transport_offset(skb) / 8;
1516 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1517 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1518 skb->tail - skb->transport_header,
1520 if (need_skb_unmap()) {
1521 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1522 skb->destructor = deferred_unmap_destructor;
1525 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1526 gen, from->wr_hi, from->wr_lo);
1530 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1533 * Returns the number of Tx descriptors needed for the given offload
1534 * packet. These packets are already fully constructed.
1536 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1538 unsigned int flits, cnt;
1540 if (skb->len <= WR_LEN)
1541 return 1; /* packet fits as immediate data */
1543 flits = skb_transport_offset(skb) / 8; /* headers */
1544 cnt = skb_shinfo(skb)->nr_frags;
1545 if (skb->tail != skb->transport_header)
1547 return flits_to_desc(flits + sgl_len(cnt));
1551 * ofld_xmit - send a packet through an offload queue
1552 * @adap: the adapter
1553 * @q: the Tx offload queue
1556 * Send an offload packet through an SGE offload queue.
1558 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1559 struct sk_buff *skb)
1562 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1564 spin_lock(&q->lock);
1565 again:reclaim_completed_tx(adap, q);
1567 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1568 if (unlikely(ret)) {
1570 skb->priority = ndesc; /* save for restart */
1571 spin_unlock(&q->lock);
1581 if (q->pidx >= q->size) {
1585 spin_unlock(&q->lock);
1587 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1588 check_ring_tx_db(adap, q);
1589 return NET_XMIT_SUCCESS;
1593 * restart_offloadq - restart a suspended offload queue
1594 * @qs: the queue set cotaining the offload queue
1596 * Resumes transmission on a suspended Tx offload queue.
1598 static void restart_offloadq(unsigned long data)
1600 struct sk_buff *skb;
1601 struct sge_qset *qs = (struct sge_qset *)data;
1602 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1603 const struct port_info *pi = netdev_priv(qs->netdev);
1604 struct adapter *adap = pi->adapter;
1606 spin_lock(&q->lock);
1607 again:reclaim_completed_tx(adap, q);
1609 while ((skb = skb_peek(&q->sendq)) != NULL) {
1610 unsigned int gen, pidx;
1611 unsigned int ndesc = skb->priority;
1613 if (unlikely(q->size - q->in_use < ndesc)) {
1614 set_bit(TXQ_OFLD, &qs->txq_stopped);
1615 smp_mb__after_clear_bit();
1617 if (should_restart_tx(q) &&
1618 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1628 if (q->pidx >= q->size) {
1632 __skb_unlink(skb, &q->sendq);
1633 spin_unlock(&q->lock);
1635 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1636 spin_lock(&q->lock);
1638 spin_unlock(&q->lock);
1641 set_bit(TXQ_RUNNING, &q->flags);
1642 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1645 t3_write_reg(adap, A_SG_KDOORBELL,
1646 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1650 * queue_set - return the queue set a packet should use
1653 * Maps a packet to the SGE queue set it should use. The desired queue
1654 * set is carried in bits 1-3 in the packet's priority.
1656 static inline int queue_set(const struct sk_buff *skb)
1658 return skb->priority >> 1;
1662 * is_ctrl_pkt - return whether an offload packet is a control packet
1665 * Determines whether an offload packet should use an OFLD or a CTRL
1666 * Tx queue. This is indicated by bit 0 in the packet's priority.
1668 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1670 return skb->priority & 1;
1674 * t3_offload_tx - send an offload packet
1675 * @tdev: the offload device to send to
1678 * Sends an offload packet. We use the packet priority to select the
1679 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1680 * should be sent as regular or control, bits 1-3 select the queue set.
1682 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1684 struct adapter *adap = tdev2adap(tdev);
1685 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1687 if (unlikely(is_ctrl_pkt(skb)))
1688 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1690 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1694 * offload_enqueue - add an offload packet to an SGE offload receive queue
1695 * @q: the SGE response queue
1698 * Add a new offload packet to an SGE response queue's offload packet
1699 * queue. If the packet is the first on the queue it schedules the RX
1700 * softirq to process the queue.
1702 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1704 int was_empty = skb_queue_empty(&q->rx_queue);
1706 __skb_queue_tail(&q->rx_queue, skb);
1709 struct sge_qset *qs = rspq_to_qset(q);
1711 napi_schedule(&qs->napi);
1716 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1717 * @tdev: the offload device that will be receiving the packets
1718 * @q: the SGE response queue that assembled the bundle
1719 * @skbs: the partial bundle
1720 * @n: the number of packets in the bundle
1722 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1724 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1726 struct sk_buff *skbs[], int n)
1729 q->offload_bundles++;
1730 tdev->recv(tdev, skbs, n);
1735 * ofld_poll - NAPI handler for offload packets in interrupt mode
1736 * @dev: the network device doing the polling
1737 * @budget: polling budget
1739 * The NAPI handler for offload packets when a response queue is serviced
1740 * by the hard interrupt handler, i.e., when it's operating in non-polling
1741 * mode. Creates small packet batches and sends them through the offload
1742 * receive handler. Batches need to be of modest size as we do prefetches
1743 * on the packets in each.
1745 static int ofld_poll(struct napi_struct *napi, int budget)
1747 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1748 struct sge_rspq *q = &qs->rspq;
1749 struct adapter *adapter = qs->adap;
1752 while (work_done < budget) {
1753 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1754 struct sk_buff_head queue;
1757 spin_lock_irq(&q->lock);
1758 __skb_queue_head_init(&queue);
1759 skb_queue_splice_init(&q->rx_queue, &queue);
1760 if (skb_queue_empty(&queue)) {
1761 napi_complete(napi);
1762 spin_unlock_irq(&q->lock);
1765 spin_unlock_irq(&q->lock);
1768 skb_queue_walk_safe(&queue, skb, tmp) {
1769 if (work_done >= budget)
1773 __skb_unlink(skb, &queue);
1774 prefetch(skb->data);
1775 skbs[ngathered] = skb;
1776 if (++ngathered == RX_BUNDLE_SIZE) {
1777 q->offload_bundles++;
1778 adapter->tdev.recv(&adapter->tdev, skbs,
1783 if (!skb_queue_empty(&queue)) {
1784 /* splice remaining packets back onto Rx queue */
1785 spin_lock_irq(&q->lock);
1786 skb_queue_splice(&queue, &q->rx_queue);
1787 spin_unlock_irq(&q->lock);
1789 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1796 * rx_offload - process a received offload packet
1797 * @tdev: the offload device receiving the packet
1798 * @rq: the response queue that received the packet
1800 * @rx_gather: a gather list of packets if we are building a bundle
1801 * @gather_idx: index of the next available slot in the bundle
1803 * Process an ingress offload pakcet and add it to the offload ingress
1804 * queue. Returns the index of the next available slot in the bundle.
1806 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1807 struct sk_buff *skb, struct sk_buff *rx_gather[],
1808 unsigned int gather_idx)
1810 skb_reset_mac_header(skb);
1811 skb_reset_network_header(skb);
1812 skb_reset_transport_header(skb);
1815 rx_gather[gather_idx++] = skb;
1816 if (gather_idx == RX_BUNDLE_SIZE) {
1817 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1819 rq->offload_bundles++;
1822 offload_enqueue(rq, skb);
1828 * restart_tx - check whether to restart suspended Tx queues
1829 * @qs: the queue set to resume
1831 * Restarts suspended Tx queues of an SGE queue set if they have enough
1832 * free resources to resume operation.
1834 static void restart_tx(struct sge_qset *qs)
1836 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1837 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1838 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1839 qs->txq[TXQ_ETH].restarts++;
1840 if (netif_running(qs->netdev))
1841 netif_wake_queue(qs->netdev);
1844 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1845 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1846 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1847 qs->txq[TXQ_OFLD].restarts++;
1848 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1850 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1851 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1852 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1853 qs->txq[TXQ_CTRL].restarts++;
1854 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1859 * rx_eth - process an ingress ethernet packet
1860 * @adap: the adapter
1861 * @rq: the response queue that received the packet
1863 * @pad: amount of padding at the start of the buffer
1865 * Process an ingress ethernet pakcet and deliver it to the stack.
1866 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1867 * if it was immediate data in a response.
1869 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1870 struct sk_buff *skb, int pad, int lro)
1872 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1873 struct sge_qset *qs = rspq_to_qset(rq);
1874 struct port_info *pi;
1876 skb_pull(skb, sizeof(*p) + pad);
1877 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1878 skb->dev->last_rx = jiffies;
1879 pi = netdev_priv(skb->dev);
1880 if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
1882 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1883 skb->ip_summed = CHECKSUM_UNNECESSARY;
1885 skb->ip_summed = CHECKSUM_NONE;
1887 if (unlikely(p->vlan_valid)) {
1888 struct vlan_group *grp = pi->vlan_grp;
1890 qs->port_stats[SGE_PSTAT_VLANEX]++;
1893 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb,
1898 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1901 dev_kfree_skb_any(skb);
1902 } else if (rq->polling) {
1904 lro_receive_skb(&qs->lro_mgr, skb, p);
1906 netif_receive_skb(skb);
1911 static inline int is_eth_tcp(u32 rss)
1913 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
1917 * lro_frame_ok - check if an ingress packet is eligible for LRO
1918 * @p: the CPL header of the packet
1920 * Returns true if a received packet is eligible for LRO.
1921 * The following conditions must be true:
1922 * - packet is TCP/IP Ethernet II (checked elsewhere)
1923 * - not an IP fragment
1925 * - TCP/IP checksums are correct
1926 * - the packet is for this host
1928 static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1930 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1931 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1933 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
1934 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1937 static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1938 u64 *hdr_flags, void *priv)
1940 const struct cpl_rx_pkt *cpl = priv;
1942 if (!lro_frame_ok(cpl))
1945 *eh = (struct ethhdr *)(cpl + 1);
1946 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1947 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1949 *hdr_flags = LRO_IPV4 | LRO_TCP;
1953 static int t3_get_skb_header(struct sk_buff *skb,
1954 void **iph, void **tcph, u64 *hdr_flags,
1959 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
1962 static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
1963 void **iph, void **tcph, u64 *hdr_flags,
1966 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
1970 * lro_add_page - add a page chunk to an LRO session
1971 * @adap: the adapter
1972 * @qs: the associated queue set
1973 * @fl: the free list containing the page chunk to add
1974 * @len: packet length
1975 * @complete: Indicates the last fragment of a frame
1977 * Add a received packet contained in a page chunk to an existing LRO
1980 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
1981 struct sge_fl *fl, int len, int complete)
1983 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1984 struct cpl_rx_pkt *cpl;
1985 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl;
1986 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len;
1990 offset = 2 + sizeof(struct cpl_rx_pkt);
1991 qs->lro_va = cpl = sd->pg_chunk.va + 2;
1997 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1998 fl->buf_size, PCI_DMA_FROMDEVICE);
2000 rx_frag += nr_frags;
2001 rx_frag->page = sd->pg_chunk.page;
2002 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2003 rx_frag->size = len;
2006 qs->lro_frag_len = frag_len;
2011 qs->lro_nfrags = qs->lro_frag_len = 0;
2014 if (unlikely(cpl->vlan_valid)) {
2015 struct net_device *dev = qs->netdev;
2016 struct port_info *pi = netdev_priv(dev);
2017 struct vlan_group *grp = pi->vlan_grp;
2019 if (likely(grp != NULL)) {
2020 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr,
2023 grp, ntohs(cpl->vlan),
2028 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl,
2029 frag_len, frag_len, cpl, 0);
2033 * init_lro_mgr - initialize a LRO manager object
2034 * @lro_mgr: the LRO manager object
2036 static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2038 lro_mgr->dev = qs->netdev;
2039 lro_mgr->features = LRO_F_NAPI;
2040 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2041 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2042 lro_mgr->max_desc = T3_MAX_LRO_SES;
2043 lro_mgr->lro_arr = qs->lro_desc;
2044 lro_mgr->get_frag_header = t3_get_frag_header;
2045 lro_mgr->get_skb_header = t3_get_skb_header;
2046 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2047 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2048 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2052 * handle_rsp_cntrl_info - handles control information in a response
2053 * @qs: the queue set corresponding to the response
2054 * @flags: the response control flags
2056 * Handles the control information of an SGE response, such as GTS
2057 * indications and completion credits for the queue set's Tx queues.
2058 * HW coalesces credits, we don't do any extra SW coalescing.
2060 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2062 unsigned int credits;
2065 if (flags & F_RSPD_TXQ0_GTS)
2066 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2069 credits = G_RSPD_TXQ0_CR(flags);
2071 qs->txq[TXQ_ETH].processed += credits;
2073 credits = G_RSPD_TXQ2_CR(flags);
2075 qs->txq[TXQ_CTRL].processed += credits;
2078 if (flags & F_RSPD_TXQ1_GTS)
2079 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2081 credits = G_RSPD_TXQ1_CR(flags);
2083 qs->txq[TXQ_OFLD].processed += credits;
2087 * check_ring_db - check if we need to ring any doorbells
2088 * @adapter: the adapter
2089 * @qs: the queue set whose Tx queues are to be examined
2090 * @sleeping: indicates which Tx queue sent GTS
2092 * Checks if some of a queue set's Tx queues need to ring their doorbells
2093 * to resume transmission after idling while they still have unprocessed
2096 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2097 unsigned int sleeping)
2099 if (sleeping & F_RSPD_TXQ0_GTS) {
2100 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2102 if (txq->cleaned + txq->in_use != txq->processed &&
2103 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2104 set_bit(TXQ_RUNNING, &txq->flags);
2105 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2106 V_EGRCNTX(txq->cntxt_id));
2110 if (sleeping & F_RSPD_TXQ1_GTS) {
2111 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2113 if (txq->cleaned + txq->in_use != txq->processed &&
2114 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2115 set_bit(TXQ_RUNNING, &txq->flags);
2116 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2117 V_EGRCNTX(txq->cntxt_id));
2123 * is_new_response - check if a response is newly written
2124 * @r: the response descriptor
2125 * @q: the response queue
2127 * Returns true if a response descriptor contains a yet unprocessed
2130 static inline int is_new_response(const struct rsp_desc *r,
2131 const struct sge_rspq *q)
2133 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2136 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2139 q->rx_recycle_buf = 0;
2142 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2143 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2144 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2145 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2146 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2148 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2149 #define NOMEM_INTR_DELAY 2500
2152 * process_responses - process responses from an SGE response queue
2153 * @adap: the adapter
2154 * @qs: the queue set to which the response queue belongs
2155 * @budget: how many responses can be processed in this round
2157 * Process responses from an SGE response queue up to the supplied budget.
2158 * Responses include received packets as well as credits and other events
2159 * for the queues that belong to the response queue's queue set.
2160 * A negative budget is effectively unlimited.
2162 * Additionally choose the interrupt holdoff time for the next interrupt
2163 * on this queue. If the system is under memory shortage use a fairly
2164 * long delay to help recovery.
2166 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2169 struct sge_rspq *q = &qs->rspq;
2170 struct rsp_desc *r = &q->desc[q->cidx];
2171 int budget_left = budget;
2172 unsigned int sleeping = 0;
2173 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2176 q->next_holdoff = q->holdoff_tmr;
2178 while (likely(budget_left && is_new_response(r, q))) {
2179 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
2180 struct sk_buff *skb = NULL;
2181 u32 len, flags = ntohl(r->flags);
2182 __be32 rss_hi = *(const __be32 *)r,
2183 rss_lo = r->rss_hdr.rss_hash_val;
2185 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2187 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2188 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2192 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2193 skb->data[0] = CPL_ASYNC_NOTIF;
2194 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2196 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2197 skb = get_imm_packet(r);
2198 if (unlikely(!skb)) {
2200 q->next_holdoff = NOMEM_INTR_DELAY;
2202 /* consume one credit since we tried */
2208 } else if ((len = ntohl(r->len_cq)) != 0) {
2212 lro = qs->lro_enabled && is_eth_tcp(rss_hi);
2214 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2215 if (fl->use_pages) {
2216 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2219 #if L1_CACHE_BYTES < 128
2220 prefetch(addr + L1_CACHE_BYTES);
2222 __refill_fl(adap, fl);
2224 lro_add_page(adap, qs, fl,
2226 flags & F_RSPD_EOP);
2230 skb = get_packet_pg(adap, fl, q,
2233 SGE_RX_DROP_THRES : 0);
2236 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2237 eth ? SGE_RX_DROP_THRES : 0);
2238 if (unlikely(!skb)) {
2242 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2245 if (++fl->cidx == fl->size)
2250 if (flags & RSPD_CTRL_MASK) {
2251 sleeping |= flags & RSPD_GTS_MASK;
2252 handle_rsp_cntrl_info(qs, flags);
2256 if (unlikely(++q->cidx == q->size)) {
2263 if (++q->credits >= (q->size / 4)) {
2264 refill_rspq(adap, q, q->credits);
2268 packet_complete = flags &
2269 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2270 F_RSPD_ASYNC_NOTIF);
2272 if (skb != NULL && packet_complete) {
2274 rx_eth(adap, q, skb, ethpad, lro);
2277 /* Preserve the RSS info in csum & priority */
2279 skb->priority = rss_lo;
2280 ngathered = rx_offload(&adap->tdev, q, skb,
2285 if (flags & F_RSPD_EOP)
2286 clear_rspq_bufstate(q);
2291 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2292 lro_flush_all(&qs->lro_mgr);
2293 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2294 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2295 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2298 check_ring_db(adap, qs, sleeping);
2300 smp_mb(); /* commit Tx queue .processed updates */
2301 if (unlikely(qs->txq_stopped != 0))
2304 budget -= budget_left;
2308 static inline int is_pure_response(const struct rsp_desc *r)
2310 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2312 return (n | r->len_cq) == 0;
2316 * napi_rx_handler - the NAPI handler for Rx processing
2317 * @napi: the napi instance
2318 * @budget: how many packets we can process in this round
2320 * Handler for new data events when using NAPI.
2322 static int napi_rx_handler(struct napi_struct *napi, int budget)
2324 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2325 struct adapter *adap = qs->adap;
2326 int work_done = process_responses(adap, qs, budget);
2328 if (likely(work_done < budget)) {
2329 napi_complete(napi);
2332 * Because we don't atomically flush the following
2333 * write it is possible that in very rare cases it can
2334 * reach the device in a way that races with a new
2335 * response being written plus an error interrupt
2336 * causing the NAPI interrupt handler below to return
2337 * unhandled status to the OS. To protect against
2338 * this would require flushing the write and doing
2339 * both the write and the flush with interrupts off.
2340 * Way too expensive and unjustifiable given the
2341 * rarity of the race.
2343 * The race cannot happen at all with MSI-X.
2345 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2346 V_NEWTIMER(qs->rspq.next_holdoff) |
2347 V_NEWINDEX(qs->rspq.cidx));
2353 * Returns true if the device is already scheduled for polling.
2355 static inline int napi_is_scheduled(struct napi_struct *napi)
2357 return test_bit(NAPI_STATE_SCHED, &napi->state);
2361 * process_pure_responses - process pure responses from a response queue
2362 * @adap: the adapter
2363 * @qs: the queue set owning the response queue
2364 * @r: the first pure response to process
2366 * A simpler version of process_responses() that handles only pure (i.e.,
2367 * non data-carrying) responses. Such respones are too light-weight to
2368 * justify calling a softirq under NAPI, so we handle them specially in
2369 * the interrupt handler. The function is called with a pointer to a
2370 * response, which the caller must ensure is a valid pure response.
2372 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2374 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2377 struct sge_rspq *q = &qs->rspq;
2378 unsigned int sleeping = 0;
2381 u32 flags = ntohl(r->flags);
2384 if (unlikely(++q->cidx == q->size)) {
2391 if (flags & RSPD_CTRL_MASK) {
2392 sleeping |= flags & RSPD_GTS_MASK;
2393 handle_rsp_cntrl_info(qs, flags);
2397 if (++q->credits >= (q->size / 4)) {
2398 refill_rspq(adap, q, q->credits);
2401 } while (is_new_response(r, q) && is_pure_response(r));
2404 check_ring_db(adap, qs, sleeping);
2406 smp_mb(); /* commit Tx queue .processed updates */
2407 if (unlikely(qs->txq_stopped != 0))
2410 return is_new_response(r, q);
2414 * handle_responses - decide what to do with new responses in NAPI mode
2415 * @adap: the adapter
2416 * @q: the response queue
2418 * This is used by the NAPI interrupt handlers to decide what to do with
2419 * new SGE responses. If there are no new responses it returns -1. If
2420 * there are new responses and they are pure (i.e., non-data carrying)
2421 * it handles them straight in hard interrupt context as they are very
2422 * cheap and don't deliver any packets. Finally, if there are any data
2423 * signaling responses it schedules the NAPI handler. Returns 1 if it
2424 * schedules NAPI, 0 if all new responses were pure.
2426 * The caller must ascertain NAPI is not already running.
2428 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2430 struct sge_qset *qs = rspq_to_qset(q);
2431 struct rsp_desc *r = &q->desc[q->cidx];
2433 if (!is_new_response(r, q))
2435 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2436 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2437 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2440 napi_schedule(&qs->napi);
2445 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2446 * (i.e., response queue serviced in hard interrupt).
2448 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2450 struct sge_qset *qs = cookie;
2451 struct adapter *adap = qs->adap;
2452 struct sge_rspq *q = &qs->rspq;
2454 spin_lock(&q->lock);
2455 if (process_responses(adap, qs, -1) == 0)
2456 q->unhandled_irqs++;
2457 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2458 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2459 spin_unlock(&q->lock);
2464 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2465 * (i.e., response queue serviced by NAPI polling).
2467 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2469 struct sge_qset *qs = cookie;
2470 struct sge_rspq *q = &qs->rspq;
2472 spin_lock(&q->lock);
2474 if (handle_responses(qs->adap, q) < 0)
2475 q->unhandled_irqs++;
2476 spin_unlock(&q->lock);
2481 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2482 * SGE response queues as well as error and other async events as they all use
2483 * the same MSI vector. We use one SGE response queue per port in this mode
2484 * and protect all response queues with queue 0's lock.
2486 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2488 int new_packets = 0;
2489 struct adapter *adap = cookie;
2490 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2492 spin_lock(&q->lock);
2494 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2495 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2496 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2500 if (adap->params.nports == 2 &&
2501 process_responses(adap, &adap->sge.qs[1], -1)) {
2502 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2504 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2505 V_NEWTIMER(q1->next_holdoff) |
2506 V_NEWINDEX(q1->cidx));
2510 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2511 q->unhandled_irqs++;
2513 spin_unlock(&q->lock);
2517 static int rspq_check_napi(struct sge_qset *qs)
2519 struct sge_rspq *q = &qs->rspq;
2521 if (!napi_is_scheduled(&qs->napi) &&
2522 is_new_response(&q->desc[q->cidx], q)) {
2523 napi_schedule(&qs->napi);
2530 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2531 * by NAPI polling). Handles data events from SGE response queues as well as
2532 * error and other async events as they all use the same MSI vector. We use
2533 * one SGE response queue per port in this mode and protect all response
2534 * queues with queue 0's lock.
2536 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2539 struct adapter *adap = cookie;
2540 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2542 spin_lock(&q->lock);
2544 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2545 if (adap->params.nports == 2)
2546 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2547 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2548 q->unhandled_irqs++;
2550 spin_unlock(&q->lock);
2555 * A helper function that processes responses and issues GTS.
2557 static inline int process_responses_gts(struct adapter *adap,
2558 struct sge_rspq *rq)
2562 work = process_responses(adap, rspq_to_qset(rq), -1);
2563 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2564 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2569 * The legacy INTx interrupt handler. This needs to handle data events from
2570 * SGE response queues as well as error and other async events as they all use
2571 * the same interrupt pin. We use one SGE response queue per port in this mode
2572 * and protect all response queues with queue 0's lock.
2574 static irqreturn_t t3_intr(int irq, void *cookie)
2576 int work_done, w0, w1;
2577 struct adapter *adap = cookie;
2578 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2579 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2581 spin_lock(&q0->lock);
2583 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2584 w1 = adap->params.nports == 2 &&
2585 is_new_response(&q1->desc[q1->cidx], q1);
2587 if (likely(w0 | w1)) {
2588 t3_write_reg(adap, A_PL_CLI, 0);
2589 t3_read_reg(adap, A_PL_CLI); /* flush */
2592 process_responses_gts(adap, q0);
2595 process_responses_gts(adap, q1);
2597 work_done = w0 | w1;
2599 work_done = t3_slow_intr_handler(adap);
2601 spin_unlock(&q0->lock);
2602 return IRQ_RETVAL(work_done != 0);
2606 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2607 * Handles data events from SGE response queues as well as error and other
2608 * async events as they all use the same interrupt pin. We use one SGE
2609 * response queue per port in this mode and protect all response queues with
2612 static irqreturn_t t3b_intr(int irq, void *cookie)
2615 struct adapter *adap = cookie;
2616 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2618 t3_write_reg(adap, A_PL_CLI, 0);
2619 map = t3_read_reg(adap, A_SG_DATA_INTR);
2621 if (unlikely(!map)) /* shared interrupt, most likely */
2624 spin_lock(&q0->lock);
2626 if (unlikely(map & F_ERRINTR))
2627 t3_slow_intr_handler(adap);
2629 if (likely(map & 1))
2630 process_responses_gts(adap, q0);
2633 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2635 spin_unlock(&q0->lock);
2640 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2641 * Handles data events from SGE response queues as well as error and other
2642 * async events as they all use the same interrupt pin. We use one SGE
2643 * response queue per port in this mode and protect all response queues with
2646 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2649 struct adapter *adap = cookie;
2650 struct sge_qset *qs0 = &adap->sge.qs[0];
2651 struct sge_rspq *q0 = &qs0->rspq;
2653 t3_write_reg(adap, A_PL_CLI, 0);
2654 map = t3_read_reg(adap, A_SG_DATA_INTR);
2656 if (unlikely(!map)) /* shared interrupt, most likely */
2659 spin_lock(&q0->lock);
2661 if (unlikely(map & F_ERRINTR))
2662 t3_slow_intr_handler(adap);
2664 if (likely(map & 1))
2665 napi_schedule(&qs0->napi);
2668 napi_schedule(&adap->sge.qs[1].napi);
2670 spin_unlock(&q0->lock);
2675 * t3_intr_handler - select the top-level interrupt handler
2676 * @adap: the adapter
2677 * @polling: whether using NAPI to service response queues
2679 * Selects the top-level interrupt handler based on the type of interrupts
2680 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2683 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2685 if (adap->flags & USING_MSIX)
2686 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2687 if (adap->flags & USING_MSI)
2688 return polling ? t3_intr_msi_napi : t3_intr_msi;
2689 if (adap->params.rev > 0)
2690 return polling ? t3b_intr_napi : t3b_intr;
2694 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2695 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2696 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2697 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2699 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2700 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2704 * t3_sge_err_intr_handler - SGE async event interrupt handler
2705 * @adapter: the adapter
2707 * Interrupt handler for SGE asynchronous (non-data) events.
2709 void t3_sge_err_intr_handler(struct adapter *adapter)
2711 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2713 if (status & SGE_PARERR)
2714 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2715 status & SGE_PARERR);
2716 if (status & SGE_FRAMINGERR)
2717 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2718 status & SGE_FRAMINGERR);
2720 if (status & F_RSPQCREDITOVERFOW)
2721 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2723 if (status & F_RSPQDISABLED) {
2724 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2727 "packet delivered to disabled response queue "
2728 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2731 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2732 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2733 status & F_HIPIODRBDROPERR ? "high" : "lo");
2735 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2736 if (status & SGE_FATALERR)
2737 t3_fatal_err(adapter);
2741 * sge_timer_cb - perform periodic maintenance of an SGE qset
2742 * @data: the SGE queue set to maintain
2744 * Runs periodically from a timer to perform maintenance of an SGE queue
2745 * set. It performs two tasks:
2747 * a) Cleans up any completed Tx descriptors that may still be pending.
2748 * Normal descriptor cleanup happens when new packets are added to a Tx
2749 * queue so this timer is relatively infrequent and does any cleanup only
2750 * if the Tx queue has not seen any new packets in a while. We make a
2751 * best effort attempt to reclaim descriptors, in that we don't wait
2752 * around if we cannot get a queue's lock (which most likely is because
2753 * someone else is queueing new packets and so will also handle the clean
2754 * up). Since control queues use immediate data exclusively we don't
2755 * bother cleaning them up here.
2757 * b) Replenishes Rx queues that have run out due to memory shortage.
2758 * Normally new Rx buffers are added when existing ones are consumed but
2759 * when out of memory a queue can become empty. We try to add only a few
2760 * buffers here, the queue will be replenished fully as these new buffers
2761 * are used up if memory shortage has subsided.
2763 static void sge_timer_cb(unsigned long data)
2766 struct sge_qset *qs = (struct sge_qset *)data;
2767 struct adapter *adap = qs->adap;
2769 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2770 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2771 spin_unlock(&qs->txq[TXQ_ETH].lock);
2773 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2774 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2775 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2777 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2778 &adap->sge.qs[0].rspq.lock;
2779 if (spin_trylock_irq(lock)) {
2780 if (!napi_is_scheduled(&qs->napi)) {
2781 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2783 if (qs->fl[0].credits < qs->fl[0].size)
2784 __refill_fl(adap, &qs->fl[0]);
2785 if (qs->fl[1].credits < qs->fl[1].size)
2786 __refill_fl(adap, &qs->fl[1]);
2788 if (status & (1 << qs->rspq.cntxt_id)) {
2790 if (qs->rspq.credits) {
2791 refill_rspq(adap, &qs->rspq, 1);
2793 qs->rspq.restarted++;
2794 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2795 1 << qs->rspq.cntxt_id);
2799 spin_unlock_irq(lock);
2801 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2805 * t3_update_qset_coalesce - update coalescing settings for a queue set
2806 * @qs: the SGE queue set
2807 * @p: new queue set parameters
2809 * Update the coalescing settings for an SGE queue set. Nothing is done
2810 * if the queue set is not initialized yet.
2812 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2814 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2815 qs->rspq.polling = p->polling;
2816 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2820 * t3_sge_alloc_qset - initialize an SGE queue set
2821 * @adapter: the adapter
2822 * @id: the queue set id
2823 * @nports: how many Ethernet ports will be using this queue set
2824 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2825 * @p: configuration parameters for this queue set
2826 * @ntxq: number of Tx queues for the queue set
2827 * @netdev: net device associated with this queue set
2829 * Allocate resources and initialize an SGE queue set. A queue set
2830 * comprises a response queue, two Rx free-buffer queues, and up to 3
2831 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2832 * queue, offload queue, and control queue.
2834 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2835 int irq_vec_idx, const struct qset_params *p,
2836 int ntxq, struct net_device *dev)
2838 int i, avail, ret = -ENOMEM;
2839 struct sge_qset *q = &adapter->sge.qs[id];
2840 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
2842 init_qset_cntxt(q, id);
2843 init_timer(&q->tx_reclaim_timer);
2844 q->tx_reclaim_timer.data = (unsigned long)q;
2845 q->tx_reclaim_timer.function = sge_timer_cb;
2847 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2848 sizeof(struct rx_desc),
2849 sizeof(struct rx_sw_desc),
2850 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2854 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2855 sizeof(struct rx_desc),
2856 sizeof(struct rx_sw_desc),
2857 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2861 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2862 sizeof(struct rsp_desc), 0,
2863 &q->rspq.phys_addr, NULL);
2867 for (i = 0; i < ntxq; ++i) {
2869 * The control queue always uses immediate data so does not
2870 * need to keep track of any sk_buffs.
2872 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2874 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2875 sizeof(struct tx_desc), sz,
2876 &q->txq[i].phys_addr,
2878 if (!q->txq[i].desc)
2882 q->txq[i].size = p->txq_size[i];
2883 spin_lock_init(&q->txq[i].lock);
2884 skb_queue_head_init(&q->txq[i].sendq);
2887 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2889 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2892 q->fl[0].gen = q->fl[1].gen = 1;
2893 q->fl[0].size = p->fl_size;
2894 q->fl[1].size = p->jumbo_size;
2897 q->rspq.size = p->rspq_size;
2898 spin_lock_init(&q->rspq.lock);
2899 skb_queue_head_init(&q->rspq.rx_queue);
2901 q->txq[TXQ_ETH].stop_thres = nports *
2902 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2904 #if FL0_PG_CHUNK_SIZE > 0
2905 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
2907 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2909 #if FL1_PG_CHUNK_SIZE > 0
2910 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2912 q->fl[1].buf_size = is_offload(adapter) ?
2913 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2914 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2917 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2918 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2919 q->fl[0].order = FL0_PG_ORDER;
2920 q->fl[1].order = FL1_PG_ORDER;
2922 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2923 sizeof(struct skb_frag_struct),
2925 q->lro_nfrags = q->lro_frag_len = 0;
2926 spin_lock_irq(&adapter->sge.reg_lock);
2928 /* FL threshold comparison uses < */
2929 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2930 q->rspq.phys_addr, q->rspq.size,
2931 q->fl[0].buf_size, 1, 0);
2935 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2936 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2937 q->fl[i].phys_addr, q->fl[i].size,
2938 q->fl[i].buf_size, p->cong_thres, 1,
2944 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2945 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2946 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2952 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2953 USE_GTS, SGE_CNTXT_OFLD, id,
2954 q->txq[TXQ_OFLD].phys_addr,
2955 q->txq[TXQ_OFLD].size, 0, 1, 0);
2961 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2963 q->txq[TXQ_CTRL].phys_addr,
2964 q->txq[TXQ_CTRL].size,
2965 q->txq[TXQ_CTRL].token, 1, 0);
2970 spin_unlock_irq(&adapter->sge.reg_lock);
2974 t3_update_qset_coalesce(q, p);
2976 init_lro_mgr(q, lro_mgr);
2978 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
2979 GFP_KERNEL | __GFP_COMP);
2981 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
2984 if (avail < q->fl[0].size)
2985 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
2988 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
2989 GFP_KERNEL | __GFP_COMP);
2990 if (avail < q->fl[1].size)
2991 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
2993 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2995 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2996 V_NEWTIMER(q->rspq.holdoff_tmr));
2998 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3002 spin_unlock_irq(&adapter->sge.reg_lock);
3004 t3_free_qset(adapter, q);
3009 * t3_stop_sge_timers - stop SGE timer call backs
3010 * @adap: the adapter
3012 * Stops each SGE queue set's timer call back
3014 void t3_stop_sge_timers(struct adapter *adap)
3018 for (i = 0; i < SGE_QSETS; ++i) {
3019 struct sge_qset *q = &adap->sge.qs[i];
3021 if (q->tx_reclaim_timer.function)
3022 del_timer_sync(&q->tx_reclaim_timer);
3027 * t3_free_sge_resources - free SGE resources
3028 * @adap: the adapter
3030 * Frees resources used by the SGE queue sets.
3032 void t3_free_sge_resources(struct adapter *adap)
3036 for (i = 0; i < SGE_QSETS; ++i)
3037 t3_free_qset(adap, &adap->sge.qs[i]);
3041 * t3_sge_start - enable SGE
3042 * @adap: the adapter
3044 * Enables the SGE for DMAs. This is the last step in starting packet
3047 void t3_sge_start(struct adapter *adap)
3049 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3053 * t3_sge_stop - disable SGE operation
3054 * @adap: the adapter
3056 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3057 * from error interrupts) or from normal process context. In the latter
3058 * case it also disables any pending queue restart tasklets. Note that
3059 * if it is called in interrupt context it cannot disable the restart
3060 * tasklets as it cannot wait, however the tasklets will have no effect
3061 * since the doorbells are disabled and the driver will call this again
3062 * later from process context, at which time the tasklets will be stopped
3063 * if they are still running.
3065 void t3_sge_stop(struct adapter *adap)
3067 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3068 if (!in_interrupt()) {
3071 for (i = 0; i < SGE_QSETS; ++i) {
3072 struct sge_qset *qs = &adap->sge.qs[i];
3074 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3075 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3081 * t3_sge_init - initialize SGE
3082 * @adap: the adapter
3083 * @p: the SGE parameters
3085 * Performs SGE initialization needed every time after a chip reset.
3086 * We do not initialize any of the queue sets here, instead the driver
3087 * top-level must request those individually. We also do not enable DMA
3088 * here, that should be done after the queues have been set up.
3090 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3092 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3094 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3095 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3096 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3097 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3098 #if SGE_NUM_GENBITS == 1
3099 ctrl |= F_EGRGENCTRL;
3101 if (adap->params.rev > 0) {
3102 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3103 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3105 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3106 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3107 V_LORCQDRBTHRSH(512));
3108 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3109 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3110 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3111 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3112 adap->params.rev < T3_REV_C ? 1000 : 500);
3113 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3114 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3115 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3116 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3117 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3121 * t3_sge_prep - one-time SGE initialization
3122 * @adap: the associated adapter
3123 * @p: SGE parameters
3125 * Performs one-time initialization of SGE SW state. Includes determining
3126 * defaults for the assorted SGE parameters, which admins can change until
3127 * they are used to initialize the SGE.
3129 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3133 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3134 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3136 for (i = 0; i < SGE_QSETS; ++i) {
3137 struct qset_params *q = p->qset + i;
3139 q->polling = adap->params.rev > 0;
3140 q->coalesce_usecs = 5;
3141 q->rspq_size = 1024;
3143 q->jumbo_size = 512;
3144 q->txq_size[TXQ_ETH] = 1024;
3145 q->txq_size[TXQ_OFLD] = 1024;
3146 q->txq_size[TXQ_CTRL] = 256;
3150 spin_lock_init(&adap->sge.reg_lock);
3154 * t3_get_desc - dump an SGE descriptor for debugging purposes
3155 * @qs: the queue set
3156 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3157 * @idx: the descriptor index in the queue
3158 * @data: where to dump the descriptor contents
3160 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3161 * size of the descriptor.
3163 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3164 unsigned char *data)
3170 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3172 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3173 return sizeof(struct tx_desc);
3177 if (!qs->rspq.desc || idx >= qs->rspq.size)
3179 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3180 return sizeof(struct rsp_desc);
3184 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3186 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3187 return sizeof(struct rx_desc);