2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
44 #include "firmware_exports.h"
48 #define SGE_RX_SM_BUF_SIZE 1536
50 #define SGE_RX_COPY_THRES 256
51 #define SGE_RX_PULL_LEN 128
54 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
55 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
58 #define FL0_PG_CHUNK_SIZE 2048
59 #define FL0_PG_ORDER 0
60 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
61 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
63 #define SGE_RX_DROP_THRES 16
66 * Period of the Tx buffer reclaim timer. This timer does not need to run
67 * frequently as Tx buffers are usually reclaimed by new Tx packets.
69 #define TX_RECLAIM_PERIOD (HZ / 4)
71 /* WR size in bytes */
72 #define WR_LEN (WR_FLITS * 8)
75 * Types of Tx queues in each queue set. Order here matters, do not change.
77 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
79 /* Values for sge_txq.flags */
81 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
82 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
86 __be64 flit[TX_DESC_FLITS];
96 struct tx_sw_desc { /* SW state per Tx descriptor */
98 u8 eop; /* set if last descriptor for packet */
99 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
100 u8 fragidx; /* first page fragment associated with descriptor */
101 s8 sflit; /* start flit of first SGL entry in descriptor */
104 struct rx_sw_desc { /* SW state per Rx descriptor */
107 struct fl_pg_chunk pg_chunk;
109 DECLARE_PCI_UNMAP_ADDR(dma_addr);
112 struct rsp_desc { /* response queue descriptor */
113 struct rss_header rss_hdr;
121 * Holds unmapping information for Tx packets that need deferred unmapping.
122 * This structure lives at skb->head and must be allocated by callers.
124 struct deferred_unmap_info {
125 struct pci_dev *pdev;
126 dma_addr_t addr[MAX_SKB_FRAGS + 1];
130 * Maps a number of flits to the number of Tx descriptors that can hold them.
133 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
135 * HW allows up to 4 descriptors to be combined into a WR.
137 static u8 flit_desc_map[] = {
139 #if SGE_NUM_GENBITS == 1
140 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
141 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
142 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
143 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
144 #elif SGE_NUM_GENBITS == 2
145 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
146 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
147 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
148 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
150 # error "SGE_NUM_GENBITS must be 1 or 2"
154 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
156 return container_of(q, struct sge_qset, fl[qidx]);
159 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
161 return container_of(q, struct sge_qset, rspq);
164 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
166 return container_of(q, struct sge_qset, txq[qidx]);
170 * refill_rspq - replenish an SGE response queue
171 * @adapter: the adapter
172 * @q: the response queue to replenish
173 * @credits: how many new responses to make available
175 * Replenishes a response queue by making the supplied number of responses
178 static inline void refill_rspq(struct adapter *adapter,
179 const struct sge_rspq *q, unsigned int credits)
182 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
183 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
187 * need_skb_unmap - does the platform need unmapping of sk_buffs?
189 * Returns true if the platfrom needs sk_buff unmapping. The compiler
190 * optimizes away unecessary code if this returns true.
192 static inline int need_skb_unmap(void)
195 * This structure is used to tell if the platfrom needs buffer
196 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
199 DECLARE_PCI_UNMAP_ADDR(addr);
202 return sizeof(struct dummy) != 0;
206 * unmap_skb - unmap a packet main body and its page fragments
208 * @q: the Tx queue containing Tx descriptors for the packet
209 * @cidx: index of Tx descriptor
210 * @pdev: the PCI device
212 * Unmap the main body of an sk_buff and its page fragments, if any.
213 * Because of the fairly complicated structure of our SGLs and the desire
214 * to conserve space for metadata, the information necessary to unmap an
215 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
216 * descriptors (the physical addresses of the various data buffers), and
217 * the SW descriptor state (assorted indices). The send functions
218 * initialize the indices for the first packet descriptor so we can unmap
219 * the buffers held in the first Tx descriptor here, and we have enough
220 * information at this point to set the state for the next Tx descriptor.
222 * Note that it is possible to clean up the first descriptor of a packet
223 * before the send routines have written the next descriptors, but this
224 * race does not cause any problem. We just end up writing the unmapping
225 * info for the descriptor first.
227 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
228 unsigned int cidx, struct pci_dev *pdev)
230 const struct sg_ent *sgp;
231 struct tx_sw_desc *d = &q->sdesc[cidx];
232 int nfrags, frag_idx, curflit, j = d->addr_idx;
234 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
235 frag_idx = d->fragidx;
237 if (frag_idx == 0 && skb_headlen(skb)) {
238 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
239 skb_headlen(skb), PCI_DMA_TODEVICE);
243 curflit = d->sflit + 1 + j;
244 nfrags = skb_shinfo(skb)->nr_frags;
246 while (frag_idx < nfrags && curflit < WR_FLITS) {
247 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
248 skb_shinfo(skb)->frags[frag_idx].size,
259 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
260 d = cidx + 1 == q->size ? q->sdesc : d + 1;
261 d->fragidx = frag_idx;
263 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
268 * free_tx_desc - reclaims Tx descriptors and their buffers
269 * @adapter: the adapter
270 * @q: the Tx queue to reclaim descriptors from
271 * @n: the number of descriptors to reclaim
273 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
274 * Tx buffers. Called with the Tx queue lock held.
276 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
279 struct tx_sw_desc *d;
280 struct pci_dev *pdev = adapter->pdev;
281 unsigned int cidx = q->cidx;
283 const int need_unmap = need_skb_unmap() &&
284 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
288 if (d->skb) { /* an SGL is present */
290 unmap_skb(d->skb, q, cidx, pdev);
295 if (++cidx == q->size) {
304 * reclaim_completed_tx - reclaims completed Tx descriptors
305 * @adapter: the adapter
306 * @q: the Tx queue to reclaim completed descriptors from
308 * Reclaims Tx descriptors that the SGE has indicated it has processed,
309 * and frees the associated buffers if possible. Called with the Tx
312 static inline void reclaim_completed_tx(struct adapter *adapter,
315 unsigned int reclaim = q->processed - q->cleaned;
318 free_tx_desc(adapter, q, reclaim);
319 q->cleaned += reclaim;
320 q->in_use -= reclaim;
325 * should_restart_tx - are there enough resources to restart a Tx queue?
328 * Checks if there are enough descriptors to restart a suspended Tx queue.
330 static inline int should_restart_tx(const struct sge_txq *q)
332 unsigned int r = q->processed - q->cleaned;
334 return q->in_use - r < (q->size >> 1);
338 * free_rx_bufs - free the Rx buffers on an SGE free list
339 * @pdev: the PCI device associated with the adapter
340 * @rxq: the SGE free list to clean up
342 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
343 * this queue should be stopped before calling this function.
345 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
347 unsigned int cidx = q->cidx;
349 while (q->credits--) {
350 struct rx_sw_desc *d = &q->sdesc[cidx];
352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
353 q->buf_size, PCI_DMA_FROMDEVICE);
355 if (d->pg_chunk.page)
356 put_page(d->pg_chunk.page);
357 d->pg_chunk.page = NULL;
362 if (++cidx == q->size)
366 if (q->pg_chunk.page) {
367 __free_pages(q->pg_chunk.page, q->order);
368 q->pg_chunk.page = NULL;
373 * add_one_rx_buf - add a packet buffer to a free-buffer list
374 * @va: buffer start VA
375 * @len: the buffer length
376 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write
378 * @gen: the generation bit value
379 * @pdev: the PCI device associated with the adapter
381 * Add a buffer of the given length to the supplied HW and SW Rx
384 static inline int add_one_rx_buf(void *va, unsigned int len,
385 struct rx_desc *d, struct rx_sw_desc *sd,
386 unsigned int gen, struct pci_dev *pdev)
390 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
391 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
394 pci_unmap_addr_set(sd, dma_addr, mapping);
396 d->addr_lo = cpu_to_be32(mapping);
397 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
399 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
400 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
404 static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
407 if (!q->pg_chunk.page) {
408 q->pg_chunk.page = alloc_pages(gfp, order);
409 if (unlikely(!q->pg_chunk.page))
411 q->pg_chunk.va = page_address(q->pg_chunk.page);
412 q->pg_chunk.offset = 0;
414 sd->pg_chunk = q->pg_chunk;
416 q->pg_chunk.offset += q->buf_size;
417 if (q->pg_chunk.offset == (PAGE_SIZE << order))
418 q->pg_chunk.page = NULL;
420 q->pg_chunk.va += q->buf_size;
421 get_page(q->pg_chunk.page);
427 * refill_fl - refill an SGE free-buffer list
428 * @adapter: the adapter
429 * @q: the free-list to refill
430 * @n: the number of new buffers to allocate
431 * @gfp: the gfp flags for allocating new buffers
433 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
434 * allocated with the supplied gfp flags. The caller must assure that
435 * @n does not exceed the queue's capacity.
437 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
440 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
441 struct rx_desc *d = &q->desc[q->pidx];
442 unsigned int count = 0;
448 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
449 nomem: q->alloc_failed++;
452 buf_start = sd->pg_chunk.va;
454 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
460 buf_start = skb->data;
463 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
475 if (++q->pidx == q->size) {
486 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
491 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
493 refill_fl(adap, fl, min(16U, fl->size - fl->credits),
494 GFP_ATOMIC | __GFP_COMP);
498 * recycle_rx_buf - recycle a receive buffer
499 * @adapter: the adapter
500 * @q: the SGE free list
501 * @idx: index of buffer to recycle
503 * Recycles the specified buffer on the given free list by adding it at
504 * the next available slot on the list.
506 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
509 struct rx_desc *from = &q->desc[idx];
510 struct rx_desc *to = &q->desc[q->pidx];
512 q->sdesc[q->pidx] = q->sdesc[idx];
513 to->addr_lo = from->addr_lo; /* already big endian */
514 to->addr_hi = from->addr_hi; /* likewise */
516 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
517 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
520 if (++q->pidx == q->size) {
524 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
528 * alloc_ring - allocate resources for an SGE descriptor ring
529 * @pdev: the PCI device
530 * @nelem: the number of descriptors
531 * @elem_size: the size of each descriptor
532 * @sw_size: the size of the SW state associated with each ring element
533 * @phys: the physical address of the allocated ring
534 * @metadata: address of the array holding the SW state for the ring
536 * Allocates resources for an SGE descriptor ring, such as Tx queues,
537 * free buffer lists, or response queues. Each SGE ring requires
538 * space for its HW descriptors plus, optionally, space for the SW state
539 * associated with each HW entry (the metadata). The function returns
540 * three values: the virtual address for the HW ring (the return value
541 * of the function), the physical address of the HW ring, and the address
544 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
545 size_t sw_size, dma_addr_t * phys, void *metadata)
547 size_t len = nelem * elem_size;
549 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
553 if (sw_size && metadata) {
554 s = kcalloc(nelem, sw_size, GFP_KERNEL);
557 dma_free_coherent(&pdev->dev, len, p, *phys);
560 *(void **)metadata = s;
567 * t3_reset_qset - reset a sge qset
570 * Reset the qset structure.
571 * the NAPI structure is preserved in the event of
572 * the qset's reincarnation, for example during EEH recovery.
574 static void t3_reset_qset(struct sge_qset *q)
577 !(q->adap->flags & NAPI_INIT)) {
578 memset(q, 0, sizeof(*q));
583 memset(&q->rspq, 0, sizeof(q->rspq));
584 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
588 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
593 * free_qset - free the resources of an SGE queue set
594 * @adapter: the adapter owning the queue set
597 * Release the HW and SW resources associated with an SGE queue set, such
598 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
599 * queue set must be quiesced prior to calling this.
601 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
604 struct pci_dev *pdev = adapter->pdev;
606 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
608 spin_lock_irq(&adapter->sge.reg_lock);
609 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
610 spin_unlock_irq(&adapter->sge.reg_lock);
611 free_rx_bufs(pdev, &q->fl[i]);
612 kfree(q->fl[i].sdesc);
613 dma_free_coherent(&pdev->dev,
615 sizeof(struct rx_desc), q->fl[i].desc,
619 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
620 if (q->txq[i].desc) {
621 spin_lock_irq(&adapter->sge.reg_lock);
622 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
623 spin_unlock_irq(&adapter->sge.reg_lock);
624 if (q->txq[i].sdesc) {
625 free_tx_desc(adapter, &q->txq[i],
627 kfree(q->txq[i].sdesc);
629 dma_free_coherent(&pdev->dev,
631 sizeof(struct tx_desc),
632 q->txq[i].desc, q->txq[i].phys_addr);
633 __skb_queue_purge(&q->txq[i].sendq);
637 spin_lock_irq(&adapter->sge.reg_lock);
638 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
639 spin_unlock_irq(&adapter->sge.reg_lock);
640 dma_free_coherent(&pdev->dev,
641 q->rspq.size * sizeof(struct rsp_desc),
642 q->rspq.desc, q->rspq.phys_addr);
649 * init_qset_cntxt - initialize an SGE queue set context info
651 * @id: the queue set id
653 * Initializes the TIDs and context ids for the queues of a queue set.
655 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
657 qs->rspq.cntxt_id = id;
658 qs->fl[0].cntxt_id = 2 * id;
659 qs->fl[1].cntxt_id = 2 * id + 1;
660 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
661 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
662 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
663 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
664 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
668 * sgl_len - calculates the size of an SGL of the given capacity
669 * @n: the number of SGL entries
671 * Calculates the number of flits needed for a scatter/gather list that
672 * can hold the given number of entries.
674 static inline unsigned int sgl_len(unsigned int n)
676 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
677 return (3 * n) / 2 + (n & 1);
681 * flits_to_desc - returns the num of Tx descriptors for the given flits
682 * @n: the number of flits
684 * Calculates the number of Tx descriptors needed for the supplied number
687 static inline unsigned int flits_to_desc(unsigned int n)
689 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
690 return flit_desc_map[n];
694 * get_packet - return the next ingress packet buffer from a free list
695 * @adap: the adapter that received the packet
696 * @fl: the SGE free list holding the packet
697 * @len: the packet length including any SGE padding
698 * @drop_thres: # of remaining buffers before we start dropping packets
700 * Get the next packet from a free list and complete setup of the
701 * sk_buff. If the packet is small we make a copy and recycle the
702 * original buffer, otherwise we use the original buffer itself. If a
703 * positive drop threshold is supplied packets are dropped and their
704 * buffers recycled if (a) the number of remaining buffers is under the
705 * threshold and the packet is too big to copy, or (b) the packet should
706 * be copied but there is no memory for the copy.
708 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
709 unsigned int len, unsigned int drop_thres)
711 struct sk_buff *skb = NULL;
712 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
714 prefetch(sd->skb->data);
717 if (len <= SGE_RX_COPY_THRES) {
718 skb = alloc_skb(len, GFP_ATOMIC);
719 if (likely(skb != NULL)) {
721 pci_dma_sync_single_for_cpu(adap->pdev,
722 pci_unmap_addr(sd, dma_addr), len,
724 memcpy(skb->data, sd->skb->data, len);
725 pci_dma_sync_single_for_device(adap->pdev,
726 pci_unmap_addr(sd, dma_addr), len,
728 } else if (!drop_thres)
731 recycle_rx_buf(adap, fl, fl->cidx);
735 if (unlikely(fl->credits < drop_thres))
739 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
740 fl->buf_size, PCI_DMA_FROMDEVICE);
743 __refill_fl(adap, fl);
748 * get_packet_pg - return the next ingress packet buffer from a free list
749 * @adap: the adapter that received the packet
750 * @fl: the SGE free list holding the packet
751 * @len: the packet length including any SGE padding
752 * @drop_thres: # of remaining buffers before we start dropping packets
754 * Get the next packet from a free list populated with page chunks.
755 * If the packet is small we make a copy and recycle the original buffer,
756 * otherwise we attach the original buffer as a page fragment to a fresh
757 * sk_buff. If a positive drop threshold is supplied packets are dropped
758 * and their buffers recycled if (a) the number of remaining buffers is
759 * under the threshold and the packet is too big to copy, or (b) there's
762 * Note: this function is similar to @get_packet but deals with Rx buffers
763 * that are page chunks rather than sk_buffs.
765 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
766 struct sge_rspq *q, unsigned int len,
767 unsigned int drop_thres)
769 struct sk_buff *newskb, *skb;
770 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
772 newskb = skb = q->pg_skb;
774 if (!skb && (len <= SGE_RX_COPY_THRES)) {
775 newskb = alloc_skb(len, GFP_ATOMIC);
776 if (likely(newskb != NULL)) {
777 __skb_put(newskb, len);
778 pci_dma_sync_single_for_cpu(adap->pdev,
779 pci_unmap_addr(sd, dma_addr), len,
781 memcpy(newskb->data, sd->pg_chunk.va, len);
782 pci_dma_sync_single_for_device(adap->pdev,
783 pci_unmap_addr(sd, dma_addr), len,
785 } else if (!drop_thres)
789 recycle_rx_buf(adap, fl, fl->cidx);
794 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
798 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
799 if (unlikely(!newskb)) {
805 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
806 fl->buf_size, PCI_DMA_FROMDEVICE);
808 __skb_put(newskb, SGE_RX_PULL_LEN);
809 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
810 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
811 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
812 len - SGE_RX_PULL_LEN);
814 newskb->data_len = len - SGE_RX_PULL_LEN;
816 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
818 sd->pg_chunk.offset, len);
820 newskb->data_len += len;
822 newskb->truesize += newskb->data_len;
826 * We do not refill FLs here, we let the caller do it to overlap a
833 * get_imm_packet - return the next ingress packet buffer from a response
834 * @resp: the response descriptor containing the packet data
836 * Return a packet containing the immediate data of the given response.
838 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
840 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
843 __skb_put(skb, IMMED_PKT_SIZE);
844 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
850 * calc_tx_descs - calculate the number of Tx descriptors for a packet
853 * Returns the number of Tx descriptors needed for the given Ethernet
854 * packet. Ethernet packets require addition of WR and CPL headers.
856 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
860 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
863 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
864 if (skb_shinfo(skb)->gso_size)
866 return flits_to_desc(flits);
870 * make_sgl - populate a scatter/gather list for a packet
872 * @sgp: the SGL to populate
873 * @start: start address of skb main body data to include in the SGL
874 * @len: length of skb main body data to include in the SGL
875 * @pdev: the PCI device
877 * Generates a scatter/gather list for the buffers that make up a packet
878 * and returns the SGL size in 8-byte words. The caller must size the SGL
881 static inline unsigned int make_sgl(const struct sk_buff *skb,
882 struct sg_ent *sgp, unsigned char *start,
883 unsigned int len, struct pci_dev *pdev)
886 unsigned int i, j = 0, nfrags;
889 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
890 sgp->len[0] = cpu_to_be32(len);
891 sgp->addr[0] = cpu_to_be64(mapping);
895 nfrags = skb_shinfo(skb)->nr_frags;
896 for (i = 0; i < nfrags; i++) {
897 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
899 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
900 frag->size, PCI_DMA_TODEVICE);
901 sgp->len[j] = cpu_to_be32(frag->size);
902 sgp->addr[j] = cpu_to_be64(mapping);
909 return ((nfrags + (len != 0)) * 3) / 2 + j;
913 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
917 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
918 * where the HW is going to sleep just after we checked, however,
919 * then the interrupt handler will detect the outstanding TX packet
920 * and ring the doorbell for us.
922 * When GTS is disabled we unconditionally ring the doorbell.
924 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
927 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
928 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
929 set_bit(TXQ_LAST_PKT_DB, &q->flags);
930 t3_write_reg(adap, A_SG_KDOORBELL,
931 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
934 wmb(); /* write descriptors before telling HW */
935 t3_write_reg(adap, A_SG_KDOORBELL,
936 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
940 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
942 #if SGE_NUM_GENBITS == 2
943 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
948 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
949 * @ndesc: number of Tx descriptors spanned by the SGL
950 * @skb: the packet corresponding to the WR
951 * @d: first Tx descriptor to be written
952 * @pidx: index of above descriptors
953 * @q: the SGE Tx queue
955 * @flits: number of flits to the start of the SGL in the first descriptor
956 * @sgl_flits: the SGL size in flits
957 * @gen: the Tx descriptor generation
958 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
959 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
961 * Write a work request header and an associated SGL. If the SGL is
962 * small enough to fit into one Tx descriptor it has already been written
963 * and we just need to write the WR header. Otherwise we distribute the
964 * SGL across the number of descriptors it spans.
966 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
967 struct tx_desc *d, unsigned int pidx,
968 const struct sge_txq *q,
969 const struct sg_ent *sgl,
970 unsigned int flits, unsigned int sgl_flits,
971 unsigned int gen, __be32 wr_hi,
974 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
975 struct tx_sw_desc *sd = &q->sdesc[pidx];
978 if (need_skb_unmap()) {
984 if (likely(ndesc == 1)) {
986 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
987 V_WR_SGLSFLT(flits)) | wr_hi;
989 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
990 V_WR_GEN(gen)) | wr_lo;
993 unsigned int ogen = gen;
994 const u64 *fp = (const u64 *)sgl;
995 struct work_request_hdr *wp = wrp;
997 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
998 V_WR_SGLSFLT(flits)) | wr_hi;
1001 unsigned int avail = WR_FLITS - flits;
1003 if (avail > sgl_flits)
1005 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1015 if (++pidx == q->size) {
1023 wrp = (struct work_request_hdr *)d;
1024 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1025 V_WR_SGLSFLT(1)) | wr_hi;
1026 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1028 V_WR_GEN(gen)) | wr_lo;
1033 wrp->wr_hi |= htonl(F_WR_EOP);
1035 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1036 wr_gen2((struct tx_desc *)wp, ogen);
1037 WARN_ON(ndesc != 0);
1042 * write_tx_pkt_wr - write a TX_PKT work request
1043 * @adap: the adapter
1044 * @skb: the packet to send
1045 * @pi: the egress interface
1046 * @pidx: index of the first Tx descriptor to write
1047 * @gen: the generation value to use
1049 * @ndesc: number of descriptors the packet will occupy
1050 * @compl: the value of the COMPL bit to use
1052 * Generate a TX_PKT work request to send the supplied packet.
1054 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1055 const struct port_info *pi,
1056 unsigned int pidx, unsigned int gen,
1057 struct sge_txq *q, unsigned int ndesc,
1060 unsigned int flits, sgl_flits, cntrl, tso_info;
1061 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1062 struct tx_desc *d = &q->desc[pidx];
1063 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1065 cpl->len = htonl(skb->len | 0x80000000);
1066 cntrl = V_TXPKT_INTF(pi->port_id);
1068 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1069 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1071 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1074 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1077 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1078 hdr->cntrl = htonl(cntrl);
1079 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1080 CPL_ETH_II : CPL_ETH_II_VLAN;
1081 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1082 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1083 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1084 hdr->lso_info = htonl(tso_info);
1087 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1088 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1089 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1090 cpl->cntrl = htonl(cntrl);
1092 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1093 q->sdesc[pidx].skb = NULL;
1095 skb_copy_from_linear_data(skb, &d->flit[2],
1098 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1100 flits = (skb->len + 7) / 8 + 2;
1101 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1102 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1103 | F_WR_SOP | F_WR_EOP | compl);
1105 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1106 V_WR_TID(q->token));
1115 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1116 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1118 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1119 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1120 htonl(V_WR_TID(q->token)));
1123 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1124 struct sge_qset *qs, struct sge_txq *q)
1126 netif_tx_stop_queue(txq);
1127 set_bit(TXQ_ETH, &qs->txq_stopped);
1132 * eth_xmit - add a packet to the Ethernet Tx queue
1134 * @dev: the egress net device
1136 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1138 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1141 unsigned int ndesc, pidx, credits, gen, compl;
1142 const struct port_info *pi = netdev_priv(dev);
1143 struct adapter *adap = pi->adapter;
1144 struct netdev_queue *txq;
1145 struct sge_qset *qs;
1149 * The chip min packet length is 9 octets but play safe and reject
1150 * anything shorter than an Ethernet header.
1152 if (unlikely(skb->len < ETH_HLEN)) {
1154 return NETDEV_TX_OK;
1157 qidx = skb_get_queue_mapping(skb);
1159 q = &qs->txq[TXQ_ETH];
1160 txq = netdev_get_tx_queue(dev, qidx);
1162 spin_lock(&q->lock);
1163 reclaim_completed_tx(adap, q);
1165 credits = q->size - q->in_use;
1166 ndesc = calc_tx_descs(skb);
1168 if (unlikely(credits < ndesc)) {
1169 t3_stop_tx_queue(txq, qs, q);
1170 dev_err(&adap->pdev->dev,
1171 "%s: Tx ring %u full while queue awake!\n",
1172 dev->name, q->cntxt_id & 7);
1173 spin_unlock(&q->lock);
1174 return NETDEV_TX_BUSY;
1178 if (unlikely(credits - ndesc < q->stop_thres)) {
1179 t3_stop_tx_queue(txq, qs, q);
1181 if (should_restart_tx(q) &&
1182 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1184 netif_tx_wake_queue(txq);
1189 q->unacked += ndesc;
1190 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1194 if (q->pidx >= q->size) {
1199 /* update port statistics */
1200 if (skb->ip_summed == CHECKSUM_COMPLETE)
1201 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1202 if (skb_shinfo(skb)->gso_size)
1203 qs->port_stats[SGE_PSTAT_TSO]++;
1204 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1205 qs->port_stats[SGE_PSTAT_VLANINS]++;
1207 dev->trans_start = jiffies;
1208 spin_unlock(&q->lock);
1211 * We do not use Tx completion interrupts to free DMAd Tx packets.
1212 * This is good for performamce but means that we rely on new Tx
1213 * packets arriving to run the destructors of completed packets,
1214 * which open up space in their sockets' send queues. Sometimes
1215 * we do not get such new packets causing Tx to stall. A single
1216 * UDP transmitter is a good example of this situation. We have
1217 * a clean up timer that periodically reclaims completed packets
1218 * but it doesn't run often enough (nor do we want it to) to prevent
1219 * lengthy stalls. A solution to this problem is to run the
1220 * destructor early, after the packet is queued but before it's DMAd.
1221 * A cons is that we lie to socket memory accounting, but the amount
1222 * of extra memory is reasonable (limited by the number of Tx
1223 * descriptors), the packets do actually get freed quickly by new
1224 * packets almost always, and for protocols like TCP that wait for
1225 * acks to really free up the data the extra memory is even less.
1226 * On the positive side we run the destructors on the sending CPU
1227 * rather than on a potentially different completing CPU, usually a
1228 * good thing. We also run them without holding our Tx queue lock,
1229 * unlike what reclaim_completed_tx() would otherwise do.
1231 * Run the destructor before telling the DMA engine about the packet
1232 * to make sure it doesn't complete and get freed prematurely.
1234 if (likely(!skb_shared(skb)))
1237 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1238 check_ring_tx_db(adap, q);
1239 return NETDEV_TX_OK;
1243 * write_imm - write a packet into a Tx descriptor as immediate data
1244 * @d: the Tx descriptor to write
1246 * @len: the length of packet data to write as immediate data
1247 * @gen: the generation bit value to write
1249 * Writes a packet as immediate data into a Tx descriptor. The packet
1250 * contains a work request at its beginning. We must write the packet
1251 * carefully so the SGE doesn't read it accidentally before it's written
1254 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1255 unsigned int len, unsigned int gen)
1257 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1258 struct work_request_hdr *to = (struct work_request_hdr *)d;
1260 if (likely(!skb->data_len))
1261 memcpy(&to[1], &from[1], len - sizeof(*from));
1263 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1265 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1266 V_WR_BCNTLFLT(len & 7));
1268 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1269 V_WR_LEN((len + 7) / 8));
1275 * check_desc_avail - check descriptor availability on a send queue
1276 * @adap: the adapter
1277 * @q: the send queue
1278 * @skb: the packet needing the descriptors
1279 * @ndesc: the number of Tx descriptors needed
1280 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1282 * Checks if the requested number of Tx descriptors is available on an
1283 * SGE send queue. If the queue is already suspended or not enough
1284 * descriptors are available the packet is queued for later transmission.
1285 * Must be called with the Tx queue locked.
1287 * Returns 0 if enough descriptors are available, 1 if there aren't
1288 * enough descriptors and the packet has been queued, and 2 if the caller
1289 * needs to retry because there weren't enough descriptors at the
1290 * beginning of the call but some freed up in the mean time.
1292 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1293 struct sk_buff *skb, unsigned int ndesc,
1296 if (unlikely(!skb_queue_empty(&q->sendq))) {
1297 addq_exit:__skb_queue_tail(&q->sendq, skb);
1300 if (unlikely(q->size - q->in_use < ndesc)) {
1301 struct sge_qset *qs = txq_to_qset(q, qid);
1303 set_bit(qid, &qs->txq_stopped);
1304 smp_mb__after_clear_bit();
1306 if (should_restart_tx(q) &&
1307 test_and_clear_bit(qid, &qs->txq_stopped))
1317 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1318 * @q: the SGE control Tx queue
1320 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1321 * that send only immediate data (presently just the control queues) and
1322 * thus do not have any sk_buffs to release.
1324 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1326 unsigned int reclaim = q->processed - q->cleaned;
1328 q->in_use -= reclaim;
1329 q->cleaned += reclaim;
1332 static inline int immediate(const struct sk_buff *skb)
1334 return skb->len <= WR_LEN;
1338 * ctrl_xmit - send a packet through an SGE control Tx queue
1339 * @adap: the adapter
1340 * @q: the control queue
1343 * Send a packet through an SGE control Tx queue. Packets sent through
1344 * a control queue must fit entirely as immediate data in a single Tx
1345 * descriptor and have no page fragments.
1347 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1348 struct sk_buff *skb)
1351 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1353 if (unlikely(!immediate(skb))) {
1356 return NET_XMIT_SUCCESS;
1359 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1360 wrp->wr_lo = htonl(V_WR_TID(q->token));
1362 spin_lock(&q->lock);
1363 again:reclaim_completed_tx_imm(q);
1365 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1366 if (unlikely(ret)) {
1368 spin_unlock(&q->lock);
1374 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1377 if (++q->pidx >= q->size) {
1381 spin_unlock(&q->lock);
1383 t3_write_reg(adap, A_SG_KDOORBELL,
1384 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1385 return NET_XMIT_SUCCESS;
1389 * restart_ctrlq - restart a suspended control queue
1390 * @qs: the queue set cotaining the control queue
1392 * Resumes transmission on a suspended Tx control queue.
1394 static void restart_ctrlq(unsigned long data)
1396 struct sk_buff *skb;
1397 struct sge_qset *qs = (struct sge_qset *)data;
1398 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1400 spin_lock(&q->lock);
1401 again:reclaim_completed_tx_imm(q);
1403 while (q->in_use < q->size &&
1404 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1406 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1408 if (++q->pidx >= q->size) {
1415 if (!skb_queue_empty(&q->sendq)) {
1416 set_bit(TXQ_CTRL, &qs->txq_stopped);
1417 smp_mb__after_clear_bit();
1419 if (should_restart_tx(q) &&
1420 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1425 spin_unlock(&q->lock);
1427 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1428 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1432 * Send a management message through control queue 0
1434 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1438 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1445 * deferred_unmap_destructor - unmap a packet when it is freed
1448 * This is the packet destructor used for Tx packets that need to remain
1449 * mapped until they are freed rather than until their Tx descriptors are
1452 static void deferred_unmap_destructor(struct sk_buff *skb)
1455 const dma_addr_t *p;
1456 const struct skb_shared_info *si;
1457 const struct deferred_unmap_info *dui;
1459 dui = (struct deferred_unmap_info *)skb->head;
1462 if (skb->tail - skb->transport_header)
1463 pci_unmap_single(dui->pdev, *p++,
1464 skb->tail - skb->transport_header,
1467 si = skb_shinfo(skb);
1468 for (i = 0; i < si->nr_frags; i++)
1469 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1473 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1474 const struct sg_ent *sgl, int sgl_flits)
1477 struct deferred_unmap_info *dui;
1479 dui = (struct deferred_unmap_info *)skb->head;
1481 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1482 *p++ = be64_to_cpu(sgl->addr[0]);
1483 *p++ = be64_to_cpu(sgl->addr[1]);
1486 *p = be64_to_cpu(sgl->addr[0]);
1490 * write_ofld_wr - write an offload work request
1491 * @adap: the adapter
1492 * @skb: the packet to send
1494 * @pidx: index of the first Tx descriptor to write
1495 * @gen: the generation value to use
1496 * @ndesc: number of descriptors the packet will occupy
1498 * Write an offload work request to send the supplied packet. The packet
1499 * data already carry the work request with most fields populated.
1501 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1502 struct sge_txq *q, unsigned int pidx,
1503 unsigned int gen, unsigned int ndesc)
1505 unsigned int sgl_flits, flits;
1506 struct work_request_hdr *from;
1507 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1508 struct tx_desc *d = &q->desc[pidx];
1510 if (immediate(skb)) {
1511 q->sdesc[pidx].skb = NULL;
1512 write_imm(d, skb, skb->len, gen);
1516 /* Only TX_DATA builds SGLs */
1518 from = (struct work_request_hdr *)skb->data;
1519 memcpy(&d->flit[1], &from[1],
1520 skb_transport_offset(skb) - sizeof(*from));
1522 flits = skb_transport_offset(skb) / 8;
1523 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1524 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1525 skb->tail - skb->transport_header,
1527 if (need_skb_unmap()) {
1528 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1529 skb->destructor = deferred_unmap_destructor;
1532 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1533 gen, from->wr_hi, from->wr_lo);
1537 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1540 * Returns the number of Tx descriptors needed for the given offload
1541 * packet. These packets are already fully constructed.
1543 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1545 unsigned int flits, cnt;
1547 if (skb->len <= WR_LEN)
1548 return 1; /* packet fits as immediate data */
1550 flits = skb_transport_offset(skb) / 8; /* headers */
1551 cnt = skb_shinfo(skb)->nr_frags;
1552 if (skb->tail != skb->transport_header)
1554 return flits_to_desc(flits + sgl_len(cnt));
1558 * ofld_xmit - send a packet through an offload queue
1559 * @adap: the adapter
1560 * @q: the Tx offload queue
1563 * Send an offload packet through an SGE offload queue.
1565 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1566 struct sk_buff *skb)
1569 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1571 spin_lock(&q->lock);
1572 again:reclaim_completed_tx(adap, q);
1574 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1575 if (unlikely(ret)) {
1577 skb->priority = ndesc; /* save for restart */
1578 spin_unlock(&q->lock);
1588 if (q->pidx >= q->size) {
1592 spin_unlock(&q->lock);
1594 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1595 check_ring_tx_db(adap, q);
1596 return NET_XMIT_SUCCESS;
1600 * restart_offloadq - restart a suspended offload queue
1601 * @qs: the queue set cotaining the offload queue
1603 * Resumes transmission on a suspended Tx offload queue.
1605 static void restart_offloadq(unsigned long data)
1607 struct sk_buff *skb;
1608 struct sge_qset *qs = (struct sge_qset *)data;
1609 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1610 const struct port_info *pi = netdev_priv(qs->netdev);
1611 struct adapter *adap = pi->adapter;
1613 spin_lock(&q->lock);
1614 again:reclaim_completed_tx(adap, q);
1616 while ((skb = skb_peek(&q->sendq)) != NULL) {
1617 unsigned int gen, pidx;
1618 unsigned int ndesc = skb->priority;
1620 if (unlikely(q->size - q->in_use < ndesc)) {
1621 set_bit(TXQ_OFLD, &qs->txq_stopped);
1622 smp_mb__after_clear_bit();
1624 if (should_restart_tx(q) &&
1625 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1635 if (q->pidx >= q->size) {
1639 __skb_unlink(skb, &q->sendq);
1640 spin_unlock(&q->lock);
1642 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1643 spin_lock(&q->lock);
1645 spin_unlock(&q->lock);
1648 set_bit(TXQ_RUNNING, &q->flags);
1649 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1652 t3_write_reg(adap, A_SG_KDOORBELL,
1653 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1657 * queue_set - return the queue set a packet should use
1660 * Maps a packet to the SGE queue set it should use. The desired queue
1661 * set is carried in bits 1-3 in the packet's priority.
1663 static inline int queue_set(const struct sk_buff *skb)
1665 return skb->priority >> 1;
1669 * is_ctrl_pkt - return whether an offload packet is a control packet
1672 * Determines whether an offload packet should use an OFLD or a CTRL
1673 * Tx queue. This is indicated by bit 0 in the packet's priority.
1675 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1677 return skb->priority & 1;
1681 * t3_offload_tx - send an offload packet
1682 * @tdev: the offload device to send to
1685 * Sends an offload packet. We use the packet priority to select the
1686 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1687 * should be sent as regular or control, bits 1-3 select the queue set.
1689 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1691 struct adapter *adap = tdev2adap(tdev);
1692 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1694 if (unlikely(is_ctrl_pkt(skb)))
1695 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1697 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1701 * offload_enqueue - add an offload packet to an SGE offload receive queue
1702 * @q: the SGE response queue
1705 * Add a new offload packet to an SGE response queue's offload packet
1706 * queue. If the packet is the first on the queue it schedules the RX
1707 * softirq to process the queue.
1709 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1711 int was_empty = skb_queue_empty(&q->rx_queue);
1713 __skb_queue_tail(&q->rx_queue, skb);
1716 struct sge_qset *qs = rspq_to_qset(q);
1718 napi_schedule(&qs->napi);
1723 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1724 * @tdev: the offload device that will be receiving the packets
1725 * @q: the SGE response queue that assembled the bundle
1726 * @skbs: the partial bundle
1727 * @n: the number of packets in the bundle
1729 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1731 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1733 struct sk_buff *skbs[], int n)
1736 q->offload_bundles++;
1737 tdev->recv(tdev, skbs, n);
1742 * ofld_poll - NAPI handler for offload packets in interrupt mode
1743 * @dev: the network device doing the polling
1744 * @budget: polling budget
1746 * The NAPI handler for offload packets when a response queue is serviced
1747 * by the hard interrupt handler, i.e., when it's operating in non-polling
1748 * mode. Creates small packet batches and sends them through the offload
1749 * receive handler. Batches need to be of modest size as we do prefetches
1750 * on the packets in each.
1752 static int ofld_poll(struct napi_struct *napi, int budget)
1754 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1755 struct sge_rspq *q = &qs->rspq;
1756 struct adapter *adapter = qs->adap;
1759 while (work_done < budget) {
1760 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1761 struct sk_buff_head queue;
1764 spin_lock_irq(&q->lock);
1765 __skb_queue_head_init(&queue);
1766 skb_queue_splice_init(&q->rx_queue, &queue);
1767 if (skb_queue_empty(&queue)) {
1768 napi_complete(napi);
1769 spin_unlock_irq(&q->lock);
1772 spin_unlock_irq(&q->lock);
1775 skb_queue_walk_safe(&queue, skb, tmp) {
1776 if (work_done >= budget)
1780 __skb_unlink(skb, &queue);
1781 prefetch(skb->data);
1782 skbs[ngathered] = skb;
1783 if (++ngathered == RX_BUNDLE_SIZE) {
1784 q->offload_bundles++;
1785 adapter->tdev.recv(&adapter->tdev, skbs,
1790 if (!skb_queue_empty(&queue)) {
1791 /* splice remaining packets back onto Rx queue */
1792 spin_lock_irq(&q->lock);
1793 skb_queue_splice(&queue, &q->rx_queue);
1794 spin_unlock_irq(&q->lock);
1796 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1803 * rx_offload - process a received offload packet
1804 * @tdev: the offload device receiving the packet
1805 * @rq: the response queue that received the packet
1807 * @rx_gather: a gather list of packets if we are building a bundle
1808 * @gather_idx: index of the next available slot in the bundle
1810 * Process an ingress offload pakcet and add it to the offload ingress
1811 * queue. Returns the index of the next available slot in the bundle.
1813 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1814 struct sk_buff *skb, struct sk_buff *rx_gather[],
1815 unsigned int gather_idx)
1817 skb_reset_mac_header(skb);
1818 skb_reset_network_header(skb);
1819 skb_reset_transport_header(skb);
1822 rx_gather[gather_idx++] = skb;
1823 if (gather_idx == RX_BUNDLE_SIZE) {
1824 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1826 rq->offload_bundles++;
1829 offload_enqueue(rq, skb);
1835 * restart_tx - check whether to restart suspended Tx queues
1836 * @qs: the queue set to resume
1838 * Restarts suspended Tx queues of an SGE queue set if they have enough
1839 * free resources to resume operation.
1841 static void restart_tx(struct sge_qset *qs)
1843 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1844 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1845 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1846 qs->txq[TXQ_ETH].restarts++;
1847 if (netif_running(qs->netdev))
1848 netif_tx_wake_queue(qs->tx_q);
1851 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1852 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1853 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1854 qs->txq[TXQ_OFLD].restarts++;
1855 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1857 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1858 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1859 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1860 qs->txq[TXQ_CTRL].restarts++;
1861 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1866 * cxgb3_arp_process - process an ARP request probing a private IP address
1867 * @adapter: the adapter
1868 * @skb: the skbuff containing the ARP request
1870 * Check if the ARP request is probing the private IP address
1871 * dedicated to iSCSI, generate an ARP reply if so.
1873 static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1875 struct net_device *dev = skb->dev;
1876 struct port_info *pi;
1878 unsigned char *arp_ptr;
1885 skb_reset_network_header(skb);
1888 if (arp->ar_op != htons(ARPOP_REQUEST))
1891 arp_ptr = (unsigned char *)(arp + 1);
1893 arp_ptr += dev->addr_len;
1894 memcpy(&sip, arp_ptr, sizeof(sip));
1895 arp_ptr += sizeof(sip);
1896 arp_ptr += dev->addr_len;
1897 memcpy(&tip, arp_ptr, sizeof(tip));
1899 pi = netdev_priv(dev);
1900 if (tip != pi->iscsi_ipv4addr)
1903 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1904 dev->dev_addr, sha);
1908 static inline int is_arp(struct sk_buff *skb)
1910 return skb->protocol == htons(ETH_P_ARP);
1914 * rx_eth - process an ingress ethernet packet
1915 * @adap: the adapter
1916 * @rq: the response queue that received the packet
1918 * @pad: amount of padding at the start of the buffer
1920 * Process an ingress ethernet pakcet and deliver it to the stack.
1921 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1922 * if it was immediate data in a response.
1924 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1925 struct sk_buff *skb, int pad, int lro)
1927 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1928 struct sge_qset *qs = rspq_to_qset(rq);
1929 struct port_info *pi;
1931 skb_pull(skb, sizeof(*p) + pad);
1932 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1933 pi = netdev_priv(skb->dev);
1934 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) &&
1936 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1937 skb->ip_summed = CHECKSUM_UNNECESSARY;
1939 skb->ip_summed = CHECKSUM_NONE;
1940 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
1942 if (unlikely(p->vlan_valid)) {
1943 struct vlan_group *grp = pi->vlan_grp;
1945 qs->port_stats[SGE_PSTAT_VLANEX]++;
1948 vlan_gro_receive(&qs->napi, grp,
1949 ntohs(p->vlan), skb);
1951 if (unlikely(pi->iscsi_ipv4addr &&
1953 unsigned short vtag = ntohs(p->vlan) &
1955 skb->dev = vlan_group_get_device(grp,
1957 cxgb3_arp_process(adap, skb);
1959 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1963 dev_kfree_skb_any(skb);
1964 } else if (rq->polling) {
1966 napi_gro_receive(&qs->napi, skb);
1968 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
1969 cxgb3_arp_process(adap, skb);
1970 netif_receive_skb(skb);
1976 static inline int is_eth_tcp(u32 rss)
1978 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
1982 * lro_add_page - add a page chunk to an LRO session
1983 * @adap: the adapter
1984 * @qs: the associated queue set
1985 * @fl: the free list containing the page chunk to add
1986 * @len: packet length
1987 * @complete: Indicates the last fragment of a frame
1989 * Add a received packet contained in a page chunk to an existing LRO
1992 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
1993 struct sge_fl *fl, int len, int complete)
1995 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1996 struct cpl_rx_pkt *cpl;
1997 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
1998 int nr_frags = qs->lro_frag_tbl.nr_frags;
1999 int frag_len = qs->lro_frag_tbl.len;
2003 offset = 2 + sizeof(struct cpl_rx_pkt);
2004 qs->lro_va = cpl = sd->pg_chunk.va + 2;
2010 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
2011 fl->buf_size, PCI_DMA_FROMDEVICE);
2013 rx_frag += nr_frags;
2014 rx_frag->page = sd->pg_chunk.page;
2015 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2016 rx_frag->size = len;
2018 qs->lro_frag_tbl.nr_frags++;
2019 qs->lro_frag_tbl.len = frag_len;
2024 qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
2027 if (unlikely(cpl->vlan_valid)) {
2028 struct net_device *dev = qs->netdev;
2029 struct port_info *pi = netdev_priv(dev);
2030 struct vlan_group *grp = pi->vlan_grp;
2032 if (likely(grp != NULL)) {
2033 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
2038 napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
2041 qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
2045 * handle_rsp_cntrl_info - handles control information in a response
2046 * @qs: the queue set corresponding to the response
2047 * @flags: the response control flags
2049 * Handles the control information of an SGE response, such as GTS
2050 * indications and completion credits for the queue set's Tx queues.
2051 * HW coalesces credits, we don't do any extra SW coalescing.
2053 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2055 unsigned int credits;
2058 if (flags & F_RSPD_TXQ0_GTS)
2059 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2062 credits = G_RSPD_TXQ0_CR(flags);
2064 qs->txq[TXQ_ETH].processed += credits;
2066 credits = G_RSPD_TXQ2_CR(flags);
2068 qs->txq[TXQ_CTRL].processed += credits;
2071 if (flags & F_RSPD_TXQ1_GTS)
2072 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2074 credits = G_RSPD_TXQ1_CR(flags);
2076 qs->txq[TXQ_OFLD].processed += credits;
2080 * check_ring_db - check if we need to ring any doorbells
2081 * @adapter: the adapter
2082 * @qs: the queue set whose Tx queues are to be examined
2083 * @sleeping: indicates which Tx queue sent GTS
2085 * Checks if some of a queue set's Tx queues need to ring their doorbells
2086 * to resume transmission after idling while they still have unprocessed
2089 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2090 unsigned int sleeping)
2092 if (sleeping & F_RSPD_TXQ0_GTS) {
2093 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2095 if (txq->cleaned + txq->in_use != txq->processed &&
2096 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2097 set_bit(TXQ_RUNNING, &txq->flags);
2098 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2099 V_EGRCNTX(txq->cntxt_id));
2103 if (sleeping & F_RSPD_TXQ1_GTS) {
2104 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2106 if (txq->cleaned + txq->in_use != txq->processed &&
2107 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2108 set_bit(TXQ_RUNNING, &txq->flags);
2109 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2110 V_EGRCNTX(txq->cntxt_id));
2116 * is_new_response - check if a response is newly written
2117 * @r: the response descriptor
2118 * @q: the response queue
2120 * Returns true if a response descriptor contains a yet unprocessed
2123 static inline int is_new_response(const struct rsp_desc *r,
2124 const struct sge_rspq *q)
2126 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2129 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2132 q->rx_recycle_buf = 0;
2135 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2136 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2137 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2138 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2139 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2141 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2142 #define NOMEM_INTR_DELAY 2500
2145 * process_responses - process responses from an SGE response queue
2146 * @adap: the adapter
2147 * @qs: the queue set to which the response queue belongs
2148 * @budget: how many responses can be processed in this round
2150 * Process responses from an SGE response queue up to the supplied budget.
2151 * Responses include received packets as well as credits and other events
2152 * for the queues that belong to the response queue's queue set.
2153 * A negative budget is effectively unlimited.
2155 * Additionally choose the interrupt holdoff time for the next interrupt
2156 * on this queue. If the system is under memory shortage use a fairly
2157 * long delay to help recovery.
2159 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2162 struct sge_rspq *q = &qs->rspq;
2163 struct rsp_desc *r = &q->desc[q->cidx];
2164 int budget_left = budget;
2165 unsigned int sleeping = 0;
2166 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2169 q->next_holdoff = q->holdoff_tmr;
2171 while (likely(budget_left && is_new_response(r, q))) {
2172 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
2173 struct sk_buff *skb = NULL;
2174 u32 len, flags = ntohl(r->flags);
2175 __be32 rss_hi = *(const __be32 *)r,
2176 rss_lo = r->rss_hdr.rss_hash_val;
2178 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2180 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2181 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2185 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2186 skb->data[0] = CPL_ASYNC_NOTIF;
2187 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2189 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2190 skb = get_imm_packet(r);
2191 if (unlikely(!skb)) {
2193 q->next_holdoff = NOMEM_INTR_DELAY;
2195 /* consume one credit since we tried */
2201 } else if ((len = ntohl(r->len_cq)) != 0) {
2205 lro = qs->lro_enabled && is_eth_tcp(rss_hi);
2207 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2208 if (fl->use_pages) {
2209 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2212 #if L1_CACHE_BYTES < 128
2213 prefetch(addr + L1_CACHE_BYTES);
2215 __refill_fl(adap, fl);
2217 lro_add_page(adap, qs, fl,
2219 flags & F_RSPD_EOP);
2223 skb = get_packet_pg(adap, fl, q,
2226 SGE_RX_DROP_THRES : 0);
2229 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2230 eth ? SGE_RX_DROP_THRES : 0);
2231 if (unlikely(!skb)) {
2235 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2238 if (++fl->cidx == fl->size)
2243 if (flags & RSPD_CTRL_MASK) {
2244 sleeping |= flags & RSPD_GTS_MASK;
2245 handle_rsp_cntrl_info(qs, flags);
2249 if (unlikely(++q->cidx == q->size)) {
2256 if (++q->credits >= (q->size / 4)) {
2257 refill_rspq(adap, q, q->credits);
2261 packet_complete = flags &
2262 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2263 F_RSPD_ASYNC_NOTIF);
2265 if (skb != NULL && packet_complete) {
2267 rx_eth(adap, q, skb, ethpad, lro);
2270 /* Preserve the RSS info in csum & priority */
2272 skb->priority = rss_lo;
2273 ngathered = rx_offload(&adap->tdev, q, skb,
2278 if (flags & F_RSPD_EOP)
2279 clear_rspq_bufstate(q);
2284 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2287 check_ring_db(adap, qs, sleeping);
2289 smp_mb(); /* commit Tx queue .processed updates */
2290 if (unlikely(qs->txq_stopped != 0))
2293 budget -= budget_left;
2297 static inline int is_pure_response(const struct rsp_desc *r)
2299 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2301 return (n | r->len_cq) == 0;
2305 * napi_rx_handler - the NAPI handler for Rx processing
2306 * @napi: the napi instance
2307 * @budget: how many packets we can process in this round
2309 * Handler for new data events when using NAPI.
2311 static int napi_rx_handler(struct napi_struct *napi, int budget)
2313 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2314 struct adapter *adap = qs->adap;
2315 int work_done = process_responses(adap, qs, budget);
2317 if (likely(work_done < budget)) {
2318 napi_complete(napi);
2321 * Because we don't atomically flush the following
2322 * write it is possible that in very rare cases it can
2323 * reach the device in a way that races with a new
2324 * response being written plus an error interrupt
2325 * causing the NAPI interrupt handler below to return
2326 * unhandled status to the OS. To protect against
2327 * this would require flushing the write and doing
2328 * both the write and the flush with interrupts off.
2329 * Way too expensive and unjustifiable given the
2330 * rarity of the race.
2332 * The race cannot happen at all with MSI-X.
2334 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2335 V_NEWTIMER(qs->rspq.next_holdoff) |
2336 V_NEWINDEX(qs->rspq.cidx));
2342 * Returns true if the device is already scheduled for polling.
2344 static inline int napi_is_scheduled(struct napi_struct *napi)
2346 return test_bit(NAPI_STATE_SCHED, &napi->state);
2350 * process_pure_responses - process pure responses from a response queue
2351 * @adap: the adapter
2352 * @qs: the queue set owning the response queue
2353 * @r: the first pure response to process
2355 * A simpler version of process_responses() that handles only pure (i.e.,
2356 * non data-carrying) responses. Such respones are too light-weight to
2357 * justify calling a softirq under NAPI, so we handle them specially in
2358 * the interrupt handler. The function is called with a pointer to a
2359 * response, which the caller must ensure is a valid pure response.
2361 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2363 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2366 struct sge_rspq *q = &qs->rspq;
2367 unsigned int sleeping = 0;
2370 u32 flags = ntohl(r->flags);
2373 if (unlikely(++q->cidx == q->size)) {
2380 if (flags & RSPD_CTRL_MASK) {
2381 sleeping |= flags & RSPD_GTS_MASK;
2382 handle_rsp_cntrl_info(qs, flags);
2386 if (++q->credits >= (q->size / 4)) {
2387 refill_rspq(adap, q, q->credits);
2390 } while (is_new_response(r, q) && is_pure_response(r));
2393 check_ring_db(adap, qs, sleeping);
2395 smp_mb(); /* commit Tx queue .processed updates */
2396 if (unlikely(qs->txq_stopped != 0))
2399 return is_new_response(r, q);
2403 * handle_responses - decide what to do with new responses in NAPI mode
2404 * @adap: the adapter
2405 * @q: the response queue
2407 * This is used by the NAPI interrupt handlers to decide what to do with
2408 * new SGE responses. If there are no new responses it returns -1. If
2409 * there are new responses and they are pure (i.e., non-data carrying)
2410 * it handles them straight in hard interrupt context as they are very
2411 * cheap and don't deliver any packets. Finally, if there are any data
2412 * signaling responses it schedules the NAPI handler. Returns 1 if it
2413 * schedules NAPI, 0 if all new responses were pure.
2415 * The caller must ascertain NAPI is not already running.
2417 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2419 struct sge_qset *qs = rspq_to_qset(q);
2420 struct rsp_desc *r = &q->desc[q->cidx];
2422 if (!is_new_response(r, q))
2424 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2425 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2426 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2429 napi_schedule(&qs->napi);
2434 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2435 * (i.e., response queue serviced in hard interrupt).
2437 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2439 struct sge_qset *qs = cookie;
2440 struct adapter *adap = qs->adap;
2441 struct sge_rspq *q = &qs->rspq;
2443 spin_lock(&q->lock);
2444 if (process_responses(adap, qs, -1) == 0)
2445 q->unhandled_irqs++;
2446 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2447 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2448 spin_unlock(&q->lock);
2453 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2454 * (i.e., response queue serviced by NAPI polling).
2456 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2458 struct sge_qset *qs = cookie;
2459 struct sge_rspq *q = &qs->rspq;
2461 spin_lock(&q->lock);
2463 if (handle_responses(qs->adap, q) < 0)
2464 q->unhandled_irqs++;
2465 spin_unlock(&q->lock);
2470 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2471 * SGE response queues as well as error and other async events as they all use
2472 * the same MSI vector. We use one SGE response queue per port in this mode
2473 * and protect all response queues with queue 0's lock.
2475 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2477 int new_packets = 0;
2478 struct adapter *adap = cookie;
2479 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2481 spin_lock(&q->lock);
2483 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2484 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2485 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2489 if (adap->params.nports == 2 &&
2490 process_responses(adap, &adap->sge.qs[1], -1)) {
2491 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2493 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2494 V_NEWTIMER(q1->next_holdoff) |
2495 V_NEWINDEX(q1->cidx));
2499 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2500 q->unhandled_irqs++;
2502 spin_unlock(&q->lock);
2506 static int rspq_check_napi(struct sge_qset *qs)
2508 struct sge_rspq *q = &qs->rspq;
2510 if (!napi_is_scheduled(&qs->napi) &&
2511 is_new_response(&q->desc[q->cidx], q)) {
2512 napi_schedule(&qs->napi);
2519 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2520 * by NAPI polling). Handles data events from SGE response queues as well as
2521 * error and other async events as they all use the same MSI vector. We use
2522 * one SGE response queue per port in this mode and protect all response
2523 * queues with queue 0's lock.
2525 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2528 struct adapter *adap = cookie;
2529 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2531 spin_lock(&q->lock);
2533 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2534 if (adap->params.nports == 2)
2535 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2536 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2537 q->unhandled_irqs++;
2539 spin_unlock(&q->lock);
2544 * A helper function that processes responses and issues GTS.
2546 static inline int process_responses_gts(struct adapter *adap,
2547 struct sge_rspq *rq)
2551 work = process_responses(adap, rspq_to_qset(rq), -1);
2552 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2553 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2558 * The legacy INTx interrupt handler. This needs to handle data events from
2559 * SGE response queues as well as error and other async events as they all use
2560 * the same interrupt pin. We use one SGE response queue per port in this mode
2561 * and protect all response queues with queue 0's lock.
2563 static irqreturn_t t3_intr(int irq, void *cookie)
2565 int work_done, w0, w1;
2566 struct adapter *adap = cookie;
2567 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2568 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2570 spin_lock(&q0->lock);
2572 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2573 w1 = adap->params.nports == 2 &&
2574 is_new_response(&q1->desc[q1->cidx], q1);
2576 if (likely(w0 | w1)) {
2577 t3_write_reg(adap, A_PL_CLI, 0);
2578 t3_read_reg(adap, A_PL_CLI); /* flush */
2581 process_responses_gts(adap, q0);
2584 process_responses_gts(adap, q1);
2586 work_done = w0 | w1;
2588 work_done = t3_slow_intr_handler(adap);
2590 spin_unlock(&q0->lock);
2591 return IRQ_RETVAL(work_done != 0);
2595 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2596 * Handles data events from SGE response queues as well as error and other
2597 * async events as they all use the same interrupt pin. We use one SGE
2598 * response queue per port in this mode and protect all response queues with
2601 static irqreturn_t t3b_intr(int irq, void *cookie)
2604 struct adapter *adap = cookie;
2605 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2607 t3_write_reg(adap, A_PL_CLI, 0);
2608 map = t3_read_reg(adap, A_SG_DATA_INTR);
2610 if (unlikely(!map)) /* shared interrupt, most likely */
2613 spin_lock(&q0->lock);
2615 if (unlikely(map & F_ERRINTR))
2616 t3_slow_intr_handler(adap);
2618 if (likely(map & 1))
2619 process_responses_gts(adap, q0);
2622 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2624 spin_unlock(&q0->lock);
2629 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2630 * Handles data events from SGE response queues as well as error and other
2631 * async events as they all use the same interrupt pin. We use one SGE
2632 * response queue per port in this mode and protect all response queues with
2635 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2638 struct adapter *adap = cookie;
2639 struct sge_qset *qs0 = &adap->sge.qs[0];
2640 struct sge_rspq *q0 = &qs0->rspq;
2642 t3_write_reg(adap, A_PL_CLI, 0);
2643 map = t3_read_reg(adap, A_SG_DATA_INTR);
2645 if (unlikely(!map)) /* shared interrupt, most likely */
2648 spin_lock(&q0->lock);
2650 if (unlikely(map & F_ERRINTR))
2651 t3_slow_intr_handler(adap);
2653 if (likely(map & 1))
2654 napi_schedule(&qs0->napi);
2657 napi_schedule(&adap->sge.qs[1].napi);
2659 spin_unlock(&q0->lock);
2664 * t3_intr_handler - select the top-level interrupt handler
2665 * @adap: the adapter
2666 * @polling: whether using NAPI to service response queues
2668 * Selects the top-level interrupt handler based on the type of interrupts
2669 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2672 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2674 if (adap->flags & USING_MSIX)
2675 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2676 if (adap->flags & USING_MSI)
2677 return polling ? t3_intr_msi_napi : t3_intr_msi;
2678 if (adap->params.rev > 0)
2679 return polling ? t3b_intr_napi : t3b_intr;
2683 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2684 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2685 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2686 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2688 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2689 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2693 * t3_sge_err_intr_handler - SGE async event interrupt handler
2694 * @adapter: the adapter
2696 * Interrupt handler for SGE asynchronous (non-data) events.
2698 void t3_sge_err_intr_handler(struct adapter *adapter)
2700 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2702 if (status & SGE_PARERR)
2703 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2704 status & SGE_PARERR);
2705 if (status & SGE_FRAMINGERR)
2706 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2707 status & SGE_FRAMINGERR);
2709 if (status & F_RSPQCREDITOVERFOW)
2710 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2712 if (status & F_RSPQDISABLED) {
2713 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2716 "packet delivered to disabled response queue "
2717 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2720 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2721 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2722 status & F_HIPIODRBDROPERR ? "high" : "lo");
2724 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2725 if (status & SGE_FATALERR)
2726 t3_fatal_err(adapter);
2730 * sge_timer_cb - perform periodic maintenance of an SGE qset
2731 * @data: the SGE queue set to maintain
2733 * Runs periodically from a timer to perform maintenance of an SGE queue
2734 * set. It performs two tasks:
2736 * a) Cleans up any completed Tx descriptors that may still be pending.
2737 * Normal descriptor cleanup happens when new packets are added to a Tx
2738 * queue so this timer is relatively infrequent and does any cleanup only
2739 * if the Tx queue has not seen any new packets in a while. We make a
2740 * best effort attempt to reclaim descriptors, in that we don't wait
2741 * around if we cannot get a queue's lock (which most likely is because
2742 * someone else is queueing new packets and so will also handle the clean
2743 * up). Since control queues use immediate data exclusively we don't
2744 * bother cleaning them up here.
2746 * b) Replenishes Rx queues that have run out due to memory shortage.
2747 * Normally new Rx buffers are added when existing ones are consumed but
2748 * when out of memory a queue can become empty. We try to add only a few
2749 * buffers here, the queue will be replenished fully as these new buffers
2750 * are used up if memory shortage has subsided.
2752 static void sge_timer_cb(unsigned long data)
2755 struct sge_qset *qs = (struct sge_qset *)data;
2756 struct adapter *adap = qs->adap;
2758 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2759 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2760 spin_unlock(&qs->txq[TXQ_ETH].lock);
2762 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2763 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2764 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2766 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2767 &adap->sge.qs[0].rspq.lock;
2768 if (spin_trylock_irq(lock)) {
2769 if (!napi_is_scheduled(&qs->napi)) {
2770 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2772 if (qs->fl[0].credits < qs->fl[0].size)
2773 __refill_fl(adap, &qs->fl[0]);
2774 if (qs->fl[1].credits < qs->fl[1].size)
2775 __refill_fl(adap, &qs->fl[1]);
2777 if (status & (1 << qs->rspq.cntxt_id)) {
2779 if (qs->rspq.credits) {
2780 refill_rspq(adap, &qs->rspq, 1);
2782 qs->rspq.restarted++;
2783 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2784 1 << qs->rspq.cntxt_id);
2788 spin_unlock_irq(lock);
2790 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2794 * t3_update_qset_coalesce - update coalescing settings for a queue set
2795 * @qs: the SGE queue set
2796 * @p: new queue set parameters
2798 * Update the coalescing settings for an SGE queue set. Nothing is done
2799 * if the queue set is not initialized yet.
2801 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2803 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2804 qs->rspq.polling = p->polling;
2805 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2809 * t3_sge_alloc_qset - initialize an SGE queue set
2810 * @adapter: the adapter
2811 * @id: the queue set id
2812 * @nports: how many Ethernet ports will be using this queue set
2813 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2814 * @p: configuration parameters for this queue set
2815 * @ntxq: number of Tx queues for the queue set
2816 * @netdev: net device associated with this queue set
2817 * @netdevq: net device TX queue associated with this queue set
2819 * Allocate resources and initialize an SGE queue set. A queue set
2820 * comprises a response queue, two Rx free-buffer queues, and up to 3
2821 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2822 * queue, offload queue, and control queue.
2824 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2825 int irq_vec_idx, const struct qset_params *p,
2826 int ntxq, struct net_device *dev,
2827 struct netdev_queue *netdevq)
2829 int i, avail, ret = -ENOMEM;
2830 struct sge_qset *q = &adapter->sge.qs[id];
2832 init_qset_cntxt(q, id);
2833 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
2835 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2836 sizeof(struct rx_desc),
2837 sizeof(struct rx_sw_desc),
2838 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2842 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2843 sizeof(struct rx_desc),
2844 sizeof(struct rx_sw_desc),
2845 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2849 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2850 sizeof(struct rsp_desc), 0,
2851 &q->rspq.phys_addr, NULL);
2855 for (i = 0; i < ntxq; ++i) {
2857 * The control queue always uses immediate data so does not
2858 * need to keep track of any sk_buffs.
2860 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2862 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2863 sizeof(struct tx_desc), sz,
2864 &q->txq[i].phys_addr,
2866 if (!q->txq[i].desc)
2870 q->txq[i].size = p->txq_size[i];
2871 spin_lock_init(&q->txq[i].lock);
2872 skb_queue_head_init(&q->txq[i].sendq);
2875 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2877 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2880 q->fl[0].gen = q->fl[1].gen = 1;
2881 q->fl[0].size = p->fl_size;
2882 q->fl[1].size = p->jumbo_size;
2885 q->rspq.size = p->rspq_size;
2886 spin_lock_init(&q->rspq.lock);
2887 skb_queue_head_init(&q->rspq.rx_queue);
2889 q->txq[TXQ_ETH].stop_thres = nports *
2890 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2892 #if FL0_PG_CHUNK_SIZE > 0
2893 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
2895 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2897 #if FL1_PG_CHUNK_SIZE > 0
2898 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2900 q->fl[1].buf_size = is_offload(adapter) ?
2901 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2902 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2905 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2906 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2907 q->fl[0].order = FL0_PG_ORDER;
2908 q->fl[1].order = FL1_PG_ORDER;
2910 spin_lock_irq(&adapter->sge.reg_lock);
2912 /* FL threshold comparison uses < */
2913 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2914 q->rspq.phys_addr, q->rspq.size,
2915 q->fl[0].buf_size, 1, 0);
2919 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2920 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2921 q->fl[i].phys_addr, q->fl[i].size,
2922 q->fl[i].buf_size, p->cong_thres, 1,
2928 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2929 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2930 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2936 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2937 USE_GTS, SGE_CNTXT_OFLD, id,
2938 q->txq[TXQ_OFLD].phys_addr,
2939 q->txq[TXQ_OFLD].size, 0, 1, 0);
2945 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2947 q->txq[TXQ_CTRL].phys_addr,
2948 q->txq[TXQ_CTRL].size,
2949 q->txq[TXQ_CTRL].token, 1, 0);
2954 spin_unlock_irq(&adapter->sge.reg_lock);
2959 t3_update_qset_coalesce(q, p);
2961 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
2962 GFP_KERNEL | __GFP_COMP);
2964 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
2967 if (avail < q->fl[0].size)
2968 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
2971 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
2972 GFP_KERNEL | __GFP_COMP);
2973 if (avail < q->fl[1].size)
2974 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
2976 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2978 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2979 V_NEWTIMER(q->rspq.holdoff_tmr));
2981 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2985 spin_unlock_irq(&adapter->sge.reg_lock);
2987 t3_free_qset(adapter, q);
2992 * t3_stop_sge_timers - stop SGE timer call backs
2993 * @adap: the adapter
2995 * Stops each SGE queue set's timer call back
2997 void t3_stop_sge_timers(struct adapter *adap)
3001 for (i = 0; i < SGE_QSETS; ++i) {
3002 struct sge_qset *q = &adap->sge.qs[i];
3004 if (q->tx_reclaim_timer.function)
3005 del_timer_sync(&q->tx_reclaim_timer);
3010 * t3_free_sge_resources - free SGE resources
3011 * @adap: the adapter
3013 * Frees resources used by the SGE queue sets.
3015 void t3_free_sge_resources(struct adapter *adap)
3019 for (i = 0; i < SGE_QSETS; ++i)
3020 t3_free_qset(adap, &adap->sge.qs[i]);
3024 * t3_sge_start - enable SGE
3025 * @adap: the adapter
3027 * Enables the SGE for DMAs. This is the last step in starting packet
3030 void t3_sge_start(struct adapter *adap)
3032 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3036 * t3_sge_stop - disable SGE operation
3037 * @adap: the adapter
3039 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3040 * from error interrupts) or from normal process context. In the latter
3041 * case it also disables any pending queue restart tasklets. Note that
3042 * if it is called in interrupt context it cannot disable the restart
3043 * tasklets as it cannot wait, however the tasklets will have no effect
3044 * since the doorbells are disabled and the driver will call this again
3045 * later from process context, at which time the tasklets will be stopped
3046 * if they are still running.
3048 void t3_sge_stop(struct adapter *adap)
3050 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3051 if (!in_interrupt()) {
3054 for (i = 0; i < SGE_QSETS; ++i) {
3055 struct sge_qset *qs = &adap->sge.qs[i];
3057 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3058 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3064 * t3_sge_init - initialize SGE
3065 * @adap: the adapter
3066 * @p: the SGE parameters
3068 * Performs SGE initialization needed every time after a chip reset.
3069 * We do not initialize any of the queue sets here, instead the driver
3070 * top-level must request those individually. We also do not enable DMA
3071 * here, that should be done after the queues have been set up.
3073 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3075 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3077 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3078 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3079 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3080 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3081 #if SGE_NUM_GENBITS == 1
3082 ctrl |= F_EGRGENCTRL;
3084 if (adap->params.rev > 0) {
3085 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3086 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3088 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3089 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3090 V_LORCQDRBTHRSH(512));
3091 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3092 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3093 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3094 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3095 adap->params.rev < T3_REV_C ? 1000 : 500);
3096 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3097 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3098 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3099 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3100 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3104 * t3_sge_prep - one-time SGE initialization
3105 * @adap: the associated adapter
3106 * @p: SGE parameters
3108 * Performs one-time initialization of SGE SW state. Includes determining
3109 * defaults for the assorted SGE parameters, which admins can change until
3110 * they are used to initialize the SGE.
3112 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3116 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3117 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3119 for (i = 0; i < SGE_QSETS; ++i) {
3120 struct qset_params *q = p->qset + i;
3122 q->polling = adap->params.rev > 0;
3123 q->coalesce_usecs = 5;
3124 q->rspq_size = 1024;
3126 q->jumbo_size = 512;
3127 q->txq_size[TXQ_ETH] = 1024;
3128 q->txq_size[TXQ_OFLD] = 1024;
3129 q->txq_size[TXQ_CTRL] = 256;
3133 spin_lock_init(&adap->sge.reg_lock);
3137 * t3_get_desc - dump an SGE descriptor for debugging purposes
3138 * @qs: the queue set
3139 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3140 * @idx: the descriptor index in the queue
3141 * @data: where to dump the descriptor contents
3143 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3144 * size of the descriptor.
3146 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3147 unsigned char *data)
3153 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3155 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3156 return sizeof(struct tx_desc);
3160 if (!qs->rspq.desc || idx >= qs->rspq.size)
3162 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3163 return sizeof(struct rsp_desc);
3167 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3169 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3170 return sizeof(struct rx_desc);